1 /**************************************************************************
2 *
3 * Copyright 2019 Red Hat.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **************************************************************************/
25
26 #include "lp_bld_nir.h"
27 #include "lp_bld_arit.h"
28 #include "lp_bld_bitarit.h"
29 #include "lp_bld_const.h"
30 #include "lp_bld_conv.h"
31 #include "lp_bld_gather.h"
32 #include "lp_bld_logic.h"
33 #include "lp_bld_quad.h"
34 #include "lp_bld_flow.h"
35 #include "lp_bld_intr.h"
36 #include "lp_bld_struct.h"
37 #include "lp_bld_debug.h"
38 #include "lp_bld_printf.h"
39 #include "nir.h"
40 #include "nir_deref.h"
41 #include "nir_search_helpers.h"
42
43
44 // Doing AOS (and linear) codegen?
45 static bool
is_aos(const struct lp_build_nir_context * bld_base)46 is_aos(const struct lp_build_nir_context *bld_base)
47 {
48 // AOS is used for vectors of uint8[16]
49 return bld_base->base.type.length == 16 && bld_base->base.type.width == 8;
50 }
51
52
53 static void
54 visit_cf_list(struct lp_build_nir_context *bld_base,
55 struct exec_list *list);
56
57
58 static LLVMValueRef
cast_type(struct lp_build_nir_context * bld_base,LLVMValueRef val,nir_alu_type alu_type,unsigned bit_size)59 cast_type(struct lp_build_nir_context *bld_base, LLVMValueRef val,
60 nir_alu_type alu_type, unsigned bit_size)
61 {
62 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
63 switch (alu_type) {
64 case nir_type_float:
65 switch (bit_size) {
66 case 16:
67 return LLVMBuildBitCast(builder, val, bld_base->half_bld.vec_type, "");
68 case 32:
69 return LLVMBuildBitCast(builder, val, bld_base->base.vec_type, "");
70 case 64:
71 return LLVMBuildBitCast(builder, val, bld_base->dbl_bld.vec_type, "");
72 default:
73 assert(0);
74 break;
75 }
76 break;
77 case nir_type_int:
78 switch (bit_size) {
79 case 8:
80 return LLVMBuildBitCast(builder, val, bld_base->int8_bld.vec_type, "");
81 case 16:
82 return LLVMBuildBitCast(builder, val, bld_base->int16_bld.vec_type, "");
83 case 32:
84 return LLVMBuildBitCast(builder, val, bld_base->int_bld.vec_type, "");
85 case 64:
86 return LLVMBuildBitCast(builder, val, bld_base->int64_bld.vec_type, "");
87 default:
88 assert(0);
89 break;
90 }
91 break;
92 case nir_type_uint:
93 switch (bit_size) {
94 case 8:
95 return LLVMBuildBitCast(builder, val, bld_base->uint8_bld.vec_type, "");
96 case 16:
97 return LLVMBuildBitCast(builder, val, bld_base->uint16_bld.vec_type, "");
98 case 1:
99 case 32:
100 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
101 case 64:
102 return LLVMBuildBitCast(builder, val, bld_base->uint64_bld.vec_type, "");
103 default:
104 assert(0);
105 break;
106 }
107 break;
108 case nir_type_uint32:
109 return LLVMBuildBitCast(builder, val, bld_base->uint_bld.vec_type, "");
110 default:
111 return val;
112 }
113 return NULL;
114 }
115
116
117 static unsigned
glsl_sampler_to_pipe(int sampler_dim,bool is_array)118 glsl_sampler_to_pipe(int sampler_dim, bool is_array)
119 {
120 unsigned pipe_target = PIPE_BUFFER;
121 switch (sampler_dim) {
122 case GLSL_SAMPLER_DIM_1D:
123 pipe_target = is_array ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
124 break;
125 case GLSL_SAMPLER_DIM_2D:
126 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
127 break;
128 case GLSL_SAMPLER_DIM_SUBPASS:
129 case GLSL_SAMPLER_DIM_SUBPASS_MS:
130 pipe_target = PIPE_TEXTURE_2D_ARRAY;
131 break;
132 case GLSL_SAMPLER_DIM_3D:
133 pipe_target = PIPE_TEXTURE_3D;
134 break;
135 case GLSL_SAMPLER_DIM_MS:
136 pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
137 break;
138 case GLSL_SAMPLER_DIM_CUBE:
139 pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
140 break;
141 case GLSL_SAMPLER_DIM_RECT:
142 pipe_target = PIPE_TEXTURE_RECT;
143 break;
144 case GLSL_SAMPLER_DIM_BUF:
145 pipe_target = PIPE_BUFFER;
146 break;
147 default:
148 break;
149 }
150 return pipe_target;
151 }
152
153
154 static LLVMValueRef
get_src(struct lp_build_nir_context * bld_base,nir_src src)155 get_src(struct lp_build_nir_context *bld_base, nir_src src)
156 {
157 return bld_base->ssa_defs[src.ssa->index];
158 }
159
160
161 static void
assign_ssa(struct lp_build_nir_context * bld_base,int idx,LLVMValueRef ptr)162 assign_ssa(struct lp_build_nir_context *bld_base, int idx, LLVMValueRef ptr)
163 {
164 bld_base->ssa_defs[idx] = ptr;
165 }
166
167
168 static void
assign_ssa_dest(struct lp_build_nir_context * bld_base,const nir_def * ssa,LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])169 assign_ssa_dest(struct lp_build_nir_context *bld_base, const nir_def *ssa,
170 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS])
171 {
172 if ((ssa->num_components == 1 || is_aos(bld_base))) {
173 assign_ssa(bld_base, ssa->index, vals[0]);
174 } else {
175 assign_ssa(bld_base, ssa->index,
176 lp_nir_array_build_gather_values(bld_base->base.gallivm->builder,
177 vals, ssa->num_components));
178 }
179 }
180
181
182 static LLVMValueRef
fcmp32(struct lp_build_nir_context * bld_base,enum pipe_compare_func compare,uint32_t src_bit_size,LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])183 fcmp32(struct lp_build_nir_context *bld_base,
184 enum pipe_compare_func compare,
185 uint32_t src_bit_size,
186 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
187 {
188 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
189 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size);
190 LLVMValueRef result;
191
192 if (compare != PIPE_FUNC_NOTEQUAL)
193 result = lp_build_cmp_ordered(flt_bld, compare, src[0], src[1]);
194 else
195 result = lp_build_cmp(flt_bld, compare, src[0], src[1]);
196 if (src_bit_size == 64)
197 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
198 else if (src_bit_size == 16)
199 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
200 return result;
201 }
202
203
204 static LLVMValueRef
icmp32(struct lp_build_nir_context * bld_base,enum pipe_compare_func compare,bool is_unsigned,uint32_t src_bit_size,LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])205 icmp32(struct lp_build_nir_context *bld_base,
206 enum pipe_compare_func compare,
207 bool is_unsigned,
208 uint32_t src_bit_size,
209 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
210 {
211 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
212 struct lp_build_context *i_bld =
213 get_int_bld(bld_base, is_unsigned, src_bit_size);
214 LLVMValueRef result = lp_build_cmp(i_bld, compare, src[0], src[1]);
215 if (src_bit_size < 32)
216 result = LLVMBuildSExt(builder, result, bld_base->int_bld.vec_type, "");
217 else if (src_bit_size == 64)
218 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
219 return result;
220 }
221
222
223 /**
224 * Get a source register value for an ALU instruction.
225 * This is where swizzles are handled. There should be no negation
226 * or absolute value modifiers.
227 * num_components indicates the number of components needed in the
228 * returned array or vector.
229 */
230 static LLVMValueRef
get_alu_src(struct lp_build_nir_context * bld_base,nir_alu_src src,unsigned num_components)231 get_alu_src(struct lp_build_nir_context *bld_base,
232 nir_alu_src src,
233 unsigned num_components)
234 {
235 assert(num_components >= 1);
236 assert(num_components <= 4);
237
238 struct gallivm_state *gallivm = bld_base->base.gallivm;
239 LLVMBuilderRef builder = gallivm->builder;
240 const unsigned src_components = nir_src_num_components(src.src);
241 assert(src_components > 0);
242 LLVMValueRef value = get_src(bld_base, src.src);
243 assert(value);
244
245 /* check if swizzling needed for the src vector */
246 bool need_swizzle = false;
247 for (unsigned i = 0; i < src_components; ++i) {
248 if (src.swizzle[i] != i) {
249 need_swizzle = true;
250 break;
251 }
252 }
253
254 if (is_aos(bld_base) && !need_swizzle) {
255 return value;
256 }
257
258 if (need_swizzle || num_components != src_components) {
259 if (is_aos(bld_base) && need_swizzle) {
260 // Handle swizzle for AOS
261 assert(LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind);
262
263 // swizzle vector of ((r,g,b,a), (r,g,b,a), (r,g,b,a), (r,g,b,a))
264 assert(bld_base->base.type.width == 8);
265 assert(bld_base->base.type.length == 16);
266
267 // Do our own swizzle here since lp_build_swizzle_aos_n() does
268 // not do what we want.
269 // Ex: value = {r0,g0,b0,a0, r1,g1,b1,a1, r2,g2,b2,a2, r3,g3,b3,a3}.
270 // aos swizzle = {2,1,0,3} // swap red/blue
271 // shuffles = {2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15}
272 // result = {b0,g0,r0,a0, b1,g1,r1,a1, b2,g2,r2,a2, b3,g3,r3,a3}.
273 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH];
274 for (unsigned i = 0; i < 16; i++) {
275 unsigned chan = i % 4;
276 /* apply src register swizzle */
277 if (chan < num_components) {
278 chan = src.swizzle[chan];
279 } else {
280 chan = src.swizzle[0];
281 }
282 /* apply aos swizzle */
283 chan = lp_nir_aos_swizzle(bld_base, chan);
284 shuffles[i] = lp_build_const_int32(gallivm, (i & ~3) + chan);
285 }
286 value = LLVMBuildShuffleVector(builder, value,
287 LLVMGetUndef(LLVMTypeOf(value)),
288 LLVMConstVector(shuffles, 16), "");
289 } else if (src_components > 1 && num_components == 1) {
290 value = LLVMBuildExtractValue(gallivm->builder, value,
291 src.swizzle[0], "");
292 } else if (src_components == 1 && num_components > 1) {
293 LLVMValueRef values[] = {value, value, value, value,
294 value, value, value, value,
295 value, value, value, value,
296 value, value, value, value};
297 value = lp_nir_array_build_gather_values(builder, values, num_components);
298 } else {
299 LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
300 for (unsigned i = 0; i < num_components; i++)
301 arr = LLVMBuildInsertValue(builder, arr, LLVMBuildExtractValue(builder, value, src.swizzle[i], ""), i, "");
302 value = arr;
303 }
304 }
305
306 return value;
307 }
308
309
310 static LLVMValueRef
emit_b2f(struct lp_build_nir_context * bld_base,LLVMValueRef src0,unsigned bitsize)311 emit_b2f(struct lp_build_nir_context *bld_base,
312 LLVMValueRef src0,
313 unsigned bitsize)
314 {
315 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
316 LLVMValueRef result =
317 LLVMBuildAnd(builder, cast_type(bld_base, src0, nir_type_int, 32),
318 LLVMBuildBitCast(builder,
319 lp_build_const_vec(bld_base->base.gallivm,
320 bld_base->base.type,
321 1.0),
322 bld_base->int_bld.vec_type, ""),
323 "");
324 result = LLVMBuildBitCast(builder, result, bld_base->base.vec_type, "");
325 switch (bitsize) {
326 case 16:
327 result = LLVMBuildFPTrunc(builder, result,
328 bld_base->half_bld.vec_type, "");
329 break;
330 case 32:
331 break;
332 case 64:
333 result = LLVMBuildFPExt(builder, result,
334 bld_base->dbl_bld.vec_type, "");
335 break;
336 default:
337 unreachable("unsupported bit size.");
338 }
339 return result;
340 }
341
342
343 static LLVMValueRef
emit_b2i(struct lp_build_nir_context * bld_base,LLVMValueRef src0,unsigned bitsize)344 emit_b2i(struct lp_build_nir_context *bld_base,
345 LLVMValueRef src0,
346 unsigned bitsize)
347 {
348 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
349 LLVMValueRef result = LLVMBuildAnd(builder,
350 cast_type(bld_base, src0, nir_type_int, 32),
351 lp_build_const_int_vec(bld_base->base.gallivm,
352 bld_base->base.type, 1), "");
353 switch (bitsize) {
354 case 8:
355 return LLVMBuildTrunc(builder, result, bld_base->int8_bld.vec_type, "");
356 case 16:
357 return LLVMBuildTrunc(builder, result, bld_base->int16_bld.vec_type, "");
358 case 32:
359 return result;
360 case 64:
361 return LLVMBuildZExt(builder, result, bld_base->int64_bld.vec_type, "");
362 default:
363 unreachable("unsupported bit size.");
364 }
365 }
366
367
368 static LLVMValueRef
emit_b32csel(struct lp_build_nir_context * bld_base,unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])369 emit_b32csel(struct lp_build_nir_context *bld_base,
370 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
371 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
372 {
373 LLVMValueRef sel = cast_type(bld_base, src[0], nir_type_int, 32);
374 LLVMValueRef v = lp_build_compare(bld_base->base.gallivm, bld_base->int_bld.type, PIPE_FUNC_NOTEQUAL, sel, bld_base->int_bld.zero);
375 struct lp_build_context *bld = get_int_bld(bld_base, false, src_bit_size[1]);
376 return lp_build_select(bld, v, src[1], src[2]);
377 }
378
379
380 static LLVMValueRef
split_64bit(struct lp_build_nir_context * bld_base,LLVMValueRef src,bool hi)381 split_64bit(struct lp_build_nir_context *bld_base,
382 LLVMValueRef src,
383 bool hi)
384 {
385 struct gallivm_state *gallivm = bld_base->base.gallivm;
386 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
387 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
388 int len = bld_base->base.type.length * 2;
389 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
390 #if UTIL_ARCH_LITTLE_ENDIAN
391 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
392 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
393 #else
394 shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
395 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
396 #endif
397 }
398
399 src = LLVMBuildBitCast(gallivm->builder, src,
400 LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
401 return LLVMBuildShuffleVector(gallivm->builder, src,
402 LLVMGetUndef(LLVMTypeOf(src)),
403 LLVMConstVector(hi ? shuffles2 : shuffles,
404 bld_base->base.type.length),
405 "");
406 }
407
408
409 static LLVMValueRef
merge_64bit(struct lp_build_nir_context * bld_base,LLVMValueRef input,LLVMValueRef input2)410 merge_64bit(struct lp_build_nir_context *bld_base,
411 LLVMValueRef input,
412 LLVMValueRef input2)
413 {
414 struct gallivm_state *gallivm = bld_base->base.gallivm;
415 LLVMBuilderRef builder = gallivm->builder;
416 int i;
417 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
418 int len = bld_base->base.type.length * 2;
419 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
420
421 for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
422 #if UTIL_ARCH_LITTLE_ENDIAN
423 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
424 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
425 #else
426 shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
427 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
428 #endif
429 }
430 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
431 }
432
433
434 static LLVMValueRef
split_16bit(struct lp_build_nir_context * bld_base,LLVMValueRef src,bool hi)435 split_16bit(struct lp_build_nir_context *bld_base,
436 LLVMValueRef src,
437 bool hi)
438 {
439 struct gallivm_state *gallivm = bld_base->base.gallivm;
440 LLVMValueRef shuffles[LP_MAX_VECTOR_WIDTH/32];
441 LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
442 int len = bld_base->base.type.length * 2;
443 for (unsigned i = 0; i < bld_base->base.type.length; i++) {
444 #if UTIL_ARCH_LITTLE_ENDIAN
445 shuffles[i] = lp_build_const_int32(gallivm, i * 2);
446 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
447 #else
448 shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
449 shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
450 #endif
451 }
452
453 src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt16TypeInContext(gallivm->context), len), "");
454 return LLVMBuildShuffleVector(gallivm->builder, src,
455 LLVMGetUndef(LLVMTypeOf(src)),
456 LLVMConstVector(hi ? shuffles2 : shuffles,
457 bld_base->base.type.length),
458 "");
459 }
460
461
462 static LLVMValueRef
merge_16bit(struct lp_build_nir_context * bld_base,LLVMValueRef input,LLVMValueRef input2)463 merge_16bit(struct lp_build_nir_context *bld_base,
464 LLVMValueRef input,
465 LLVMValueRef input2)
466 {
467 struct gallivm_state *gallivm = bld_base->base.gallivm;
468 LLVMBuilderRef builder = gallivm->builder;
469 int i;
470 LLVMValueRef shuffles[2 * (LP_MAX_VECTOR_WIDTH/32)];
471 int len = bld_base->int16_bld.type.length * 2;
472 assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
473
474 for (i = 0; i < bld_base->int_bld.type.length * 2; i+=2) {
475 #if UTIL_ARCH_LITTLE_ENDIAN
476 shuffles[i] = lp_build_const_int32(gallivm, i / 2);
477 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
478 #else
479 shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
480 shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
481 #endif
482 }
483 return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
484 }
485
486
487 static LLVMValueRef
get_signed_divisor(struct gallivm_state * gallivm,struct lp_build_context * int_bld,struct lp_build_context * mask_bld,int src_bit_size,LLVMValueRef src,LLVMValueRef divisor)488 get_signed_divisor(struct gallivm_state *gallivm,
489 struct lp_build_context *int_bld,
490 struct lp_build_context *mask_bld,
491 int src_bit_size,
492 LLVMValueRef src, LLVMValueRef divisor)
493 {
494 LLVMBuilderRef builder = gallivm->builder;
495 /* However for signed divides SIGFPE can occur if the numerator is INT_MIN
496 and divisor is -1. */
497 /* set mask if numerator == INT_MIN */
498 long long min_val;
499 switch (src_bit_size) {
500 case 8:
501 min_val = INT8_MIN;
502 break;
503 case 16:
504 min_val = INT16_MIN;
505 break;
506 default:
507 case 32:
508 min_val = INT_MIN;
509 break;
510 case 64:
511 min_val = INT64_MIN;
512 break;
513 }
514 LLVMValueRef div_mask2 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src,
515 lp_build_const_int_vec(gallivm, int_bld->type, min_val));
516 /* set another mask if divisor is - 1 */
517 LLVMValueRef div_mask3 = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, divisor,
518 lp_build_const_int_vec(gallivm, int_bld->type, -1));
519 div_mask2 = LLVMBuildAnd(builder, div_mask2, div_mask3, "");
520
521 divisor = lp_build_select(mask_bld, div_mask2, int_bld->one, divisor);
522 return divisor;
523 }
524
525
526 static LLVMValueRef
do_int_divide(struct lp_build_nir_context * bld_base,bool is_unsigned,unsigned src_bit_size,LLVMValueRef src,LLVMValueRef src2)527 do_int_divide(struct lp_build_nir_context *bld_base,
528 bool is_unsigned, unsigned src_bit_size,
529 LLVMValueRef src, LLVMValueRef src2)
530 {
531 struct gallivm_state *gallivm = bld_base->base.gallivm;
532 LLVMBuilderRef builder = gallivm->builder;
533 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
534 struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
535
536 /* avoid divide by 0. Converted divisor from 0 to -1 */
537 LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
538 mask_bld->zero);
539
540 LLVMValueRef divisor = LLVMBuildOr(builder, div_mask, src2, "");
541 if (!is_unsigned) {
542 divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
543 src_bit_size, src, divisor);
544 }
545 LLVMValueRef result = lp_build_div(int_bld, src, divisor);
546
547 if (!is_unsigned) {
548 LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
549 return LLVMBuildAnd(builder, not_div_mask, result, "");
550 } else
551 /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
552 * may as well do same for idiv */
553 return LLVMBuildOr(builder, div_mask, result, "");
554 }
555
556
557 static LLVMValueRef
do_int_mod(struct lp_build_nir_context * bld_base,bool is_unsigned,unsigned src_bit_size,LLVMValueRef src,LLVMValueRef src2)558 do_int_mod(struct lp_build_nir_context *bld_base,
559 bool is_unsigned, unsigned src_bit_size,
560 LLVMValueRef src, LLVMValueRef src2)
561 {
562 struct gallivm_state *gallivm = bld_base->base.gallivm;
563 LLVMBuilderRef builder = gallivm->builder;
564 struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
565 struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
566 LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
567 mask_bld->zero);
568 LLVMValueRef divisor = LLVMBuildOr(builder,
569 div_mask,
570 src2, "");
571 if (!is_unsigned) {
572 divisor = get_signed_divisor(gallivm, int_bld, mask_bld,
573 src_bit_size, src, divisor);
574 }
575 LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
576 return LLVMBuildOr(builder, div_mask, result, "");
577 }
578
579 static LLVMValueRef
do_alu_action(struct lp_build_nir_context * bld_base,const nir_alu_instr * instr,unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])580 do_alu_action(struct lp_build_nir_context *bld_base,
581 const nir_alu_instr *instr,
582 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS],
583 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS])
584 {
585 struct gallivm_state *gallivm = bld_base->base.gallivm;
586 LLVMBuilderRef builder = gallivm->builder;
587 LLVMValueRef result;
588
589 switch (instr->op) {
590 case nir_op_b2f16:
591 result = emit_b2f(bld_base, src[0], 16);
592 break;
593 case nir_op_b2f32:
594 result = emit_b2f(bld_base, src[0], 32);
595 break;
596 case nir_op_b2f64:
597 result = emit_b2f(bld_base, src[0], 64);
598 break;
599 case nir_op_b2i8:
600 result = emit_b2i(bld_base, src[0], 8);
601 break;
602 case nir_op_b2i16:
603 result = emit_b2i(bld_base, src[0], 16);
604 break;
605 case nir_op_b2i32:
606 result = emit_b2i(bld_base, src[0], 32);
607 break;
608 case nir_op_b2i64:
609 result = emit_b2i(bld_base, src[0], 64);
610 break;
611 case nir_op_b32csel:
612 result = emit_b32csel(bld_base, src_bit_size, src);
613 break;
614 case nir_op_bit_count:
615 result = lp_build_popcount(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
616 if (src_bit_size[0] < 32)
617 result = LLVMBuildZExt(builder, result, bld_base->int_bld.vec_type, "");
618 else if (src_bit_size[0] > 32)
619 result = LLVMBuildTrunc(builder, result, bld_base->int_bld.vec_type, "");
620 break;
621 case nir_op_bitfield_select:
622 result = lp_build_xor(&bld_base->uint_bld, src[2], lp_build_and(&bld_base->uint_bld, src[0], lp_build_xor(&bld_base->uint_bld, src[1], src[2])));
623 break;
624 case nir_op_bitfield_reverse:
625 result = lp_build_bitfield_reverse(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
626 break;
627 case nir_op_f2f16:
628 if (src_bit_size[0] == 64)
629 src[0] = LLVMBuildFPTrunc(builder, src[0],
630 bld_base->base.vec_type, "");
631 result = LLVMBuildFPTrunc(builder, src[0],
632 bld_base->half_bld.vec_type, "");
633 break;
634 case nir_op_f2f32:
635 if (src_bit_size[0] < 32)
636 result = LLVMBuildFPExt(builder, src[0],
637 bld_base->base.vec_type, "");
638 else
639 result = LLVMBuildFPTrunc(builder, src[0],
640 bld_base->base.vec_type, "");
641 break;
642 case nir_op_f2f64:
643 result = LLVMBuildFPExt(builder, src[0],
644 bld_base->dbl_bld.vec_type, "");
645 break;
646 case nir_op_f2i8:
647 case nir_op_f2i16:
648 case nir_op_f2i32:
649 case nir_op_f2i64:
650 case nir_op_f2u8:
651 case nir_op_f2u16:
652 case nir_op_f2u32:
653 case nir_op_f2u64: {
654 nir_alu_type dst_type = nir_op_infos[instr->op].output_type;
655 bool is_unsigned = nir_alu_type_get_base_type(dst_type) == nir_type_uint;
656 LLVMTypeRef int_type = get_int_bld(bld_base, is_unsigned, nir_alu_type_get_type_size(dst_type))->vec_type;
657
658 char name[64];
659 char tmp[64];
660 char intrinsic[64];
661 snprintf(name, sizeof(name), "llvm.fpto%ci.sat", is_unsigned ? 'u' : 's');
662 lp_format_intrinsic(tmp, 64, name, int_type);
663 lp_format_intrinsic(intrinsic, 64, tmp, LLVMTypeOf(src[0]));
664 result = lp_build_intrinsic_unary(builder, intrinsic, int_type, src[0]);
665 break;
666 }
667 case nir_op_fabs:
668 result = lp_build_abs(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
669 break;
670 case nir_op_fadd:
671 result = lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
672 src[0], src[1]);
673 break;
674 case nir_op_fceil:
675 result = lp_build_ceil(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
676 break;
677 case nir_op_fcos:
678 result = lp_build_cos(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
679 break;
680 case nir_op_fdiv:
681 result = lp_build_div(get_flt_bld(bld_base, src_bit_size[0]),
682 src[0], src[1]);
683 break;
684 case nir_op_feq32:
685 result = fcmp32(bld_base, PIPE_FUNC_EQUAL, src_bit_size[0], src);
686 break;
687 case nir_op_fexp2:
688 result = lp_build_exp2(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
689 break;
690 case nir_op_ffloor:
691 result = lp_build_floor(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
692 break;
693 case nir_op_ffma:
694 result = lp_build_fmuladd(builder, src[0], src[1], src[2]);
695 break;
696 case nir_op_ffract: {
697 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
698 LLVMValueRef tmp = lp_build_floor(flt_bld, src[0]);
699 result = lp_build_sub(flt_bld, src[0], tmp);
700 break;
701 }
702 case nir_op_fge:
703 case nir_op_fge32:
704 result = fcmp32(bld_base, PIPE_FUNC_GEQUAL, src_bit_size[0], src);
705 break;
706 case nir_op_find_lsb: {
707 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
708 result = lp_build_cttz(int_bld, src[0]);
709 if (src_bit_size[0] < 32)
710 result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
711 else if (src_bit_size[0] > 32)
712 result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
713 break;
714 }
715 case nir_op_fisfinite32:
716 unreachable("Should have been lowered in nir_opt_algebraic_late.");
717 case nir_op_flog2:
718 result = lp_build_log2_safe(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
719 break;
720 case nir_op_flt:
721 case nir_op_flt32:
722 result = fcmp32(bld_base, PIPE_FUNC_LESS, src_bit_size[0], src);
723 break;
724 case nir_op_fmax:
725 case nir_op_fmin: {
726 enum gallivm_nan_behavior minmax_nan;
727 int first = 0;
728
729 /* If one of the sources is known to be a number (i.e., not NaN), then
730 * better code can be generated by passing that information along.
731 */
732 if (is_a_number(bld_base->range_ht, instr, 1,
733 0 /* unused num_components */,
734 NULL /* unused swizzle */)) {
735 minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
736 } else if (is_a_number(bld_base->range_ht, instr, 0,
737 0 /* unused num_components */,
738 NULL /* unused swizzle */)) {
739 first = 1;
740 minmax_nan = GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN;
741 } else {
742 minmax_nan = GALLIVM_NAN_RETURN_OTHER;
743 }
744
745 if (instr->op == nir_op_fmin) {
746 result = lp_build_min_ext(get_flt_bld(bld_base, src_bit_size[0]),
747 src[first], src[1 - first], minmax_nan);
748 } else {
749 result = lp_build_max_ext(get_flt_bld(bld_base, src_bit_size[0]),
750 src[first], src[1 - first], minmax_nan);
751 }
752 break;
753 }
754 case nir_op_fmod: {
755 struct lp_build_context *flt_bld = get_flt_bld(bld_base, src_bit_size[0]);
756 result = lp_build_div(flt_bld, src[0], src[1]);
757 result = lp_build_floor(flt_bld, result);
758 result = lp_build_mul(flt_bld, src[1], result);
759 result = lp_build_sub(flt_bld, src[0], result);
760 break;
761 }
762 case nir_op_fmul:
763 result = lp_build_mul(get_flt_bld(bld_base, src_bit_size[0]),
764 src[0], src[1]);
765 break;
766 case nir_op_fneu32:
767 result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src);
768 break;
769 case nir_op_fneg:
770 result = lp_build_negate(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
771 break;
772 case nir_op_fpow:
773 result = lp_build_pow(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]);
774 break;
775 case nir_op_frcp:
776 result = lp_build_rcp(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
777 break;
778 case nir_op_fround_even:
779 if (src_bit_size[0] == 16) {
780 struct lp_build_context *bld = get_flt_bld(bld_base, 16);
781 char intrinsic[64];
782 lp_format_intrinsic(intrinsic, 64, "llvm.roundeven", bld->vec_type);
783 result = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, src[0]);
784 } else {
785 result = lp_build_round(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
786 }
787 break;
788 case nir_op_frsq:
789 result = lp_build_rsqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
790 break;
791 case nir_op_fsat:
792 result = lp_build_clamp_zero_one_nanzero(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
793 break;
794 case nir_op_fsign:
795 result = lp_build_sgn(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
796 break;
797 case nir_op_fsin:
798 result = lp_build_sin(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
799 break;
800 case nir_op_fsqrt:
801 result = lp_build_sqrt(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
802 break;
803 case nir_op_ftrunc:
804 result = lp_build_trunc(get_flt_bld(bld_base, src_bit_size[0]), src[0]);
805 break;
806 case nir_op_i2f16:
807 result = LLVMBuildSIToFP(builder, src[0],
808 bld_base->half_bld.vec_type, "");
809 break;
810 case nir_op_i2f32:
811 result = lp_build_int_to_float(&bld_base->base, src[0]);
812 break;
813 case nir_op_i2f64:
814 result = lp_build_int_to_float(&bld_base->dbl_bld, src[0]);
815 break;
816 case nir_op_i2i8:
817 result = LLVMBuildTrunc(builder, src[0], bld_base->int8_bld.vec_type, "");
818 break;
819 case nir_op_i2i16:
820 if (src_bit_size[0] < 16)
821 result = LLVMBuildSExt(builder, src[0], bld_base->int16_bld.vec_type, "");
822 else
823 result = LLVMBuildTrunc(builder, src[0], bld_base->int16_bld.vec_type, "");
824 break;
825 case nir_op_i2i32:
826 if (src_bit_size[0] < 32)
827 result = LLVMBuildSExt(builder, src[0], bld_base->int_bld.vec_type, "");
828 else
829 result = LLVMBuildTrunc(builder, src[0], bld_base->int_bld.vec_type, "");
830 break;
831 case nir_op_i2i64:
832 result = LLVMBuildSExt(builder, src[0], bld_base->int64_bld.vec_type, "");
833 break;
834 case nir_op_iabs:
835 result = lp_build_abs(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
836 break;
837 case nir_op_iadd:
838 result = lp_build_add(get_int_bld(bld_base, false, src_bit_size[0]),
839 src[0], src[1]);
840 break;
841 case nir_op_iand:
842 result = lp_build_and(get_int_bld(bld_base, false, src_bit_size[0]),
843 src[0], src[1]);
844 break;
845 case nir_op_idiv:
846 result = do_int_divide(bld_base, false, src_bit_size[0], src[0], src[1]);
847 break;
848 case nir_op_ieq32:
849 result = icmp32(bld_base, PIPE_FUNC_EQUAL, false, src_bit_size[0], src);
850 break;
851 case nir_op_ige32:
852 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, false, src_bit_size[0], src);
853 break;
854 case nir_op_ilt32:
855 result = icmp32(bld_base, PIPE_FUNC_LESS, false, src_bit_size[0], src);
856 break;
857 case nir_op_imax:
858 result = lp_build_max(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
859 break;
860 case nir_op_imin:
861 result = lp_build_min(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1]);
862 break;
863 case nir_op_imul:
864 case nir_op_imul24:
865 result = lp_build_mul(get_int_bld(bld_base, false, src_bit_size[0]),
866 src[0], src[1]);
867 break;
868 case nir_op_imul_high: {
869 LLVMValueRef hi_bits;
870 lp_build_mul_32_lohi(get_int_bld(bld_base, false, src_bit_size[0]), src[0], src[1], &hi_bits);
871 result = hi_bits;
872 break;
873 }
874 case nir_op_ine32:
875 result = icmp32(bld_base, PIPE_FUNC_NOTEQUAL, false, src_bit_size[0], src);
876 break;
877 case nir_op_ineg:
878 result = lp_build_negate(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
879 break;
880 case nir_op_inot:
881 result = lp_build_not(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
882 break;
883 case nir_op_ior:
884 result = lp_build_or(get_int_bld(bld_base, false, src_bit_size[0]),
885 src[0], src[1]);
886 break;
887 case nir_op_imod:
888 case nir_op_irem:
889 result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
890 break;
891 case nir_op_ishl: {
892 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
893 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
894 if (src_bit_size[0] == 64)
895 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
896 if (src_bit_size[0] < 32)
897 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
898 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
899 result = lp_build_shl(int_bld, src[0], src[1]);
900 break;
901 }
902 case nir_op_ishr: {
903 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
904 struct lp_build_context *int_bld = get_int_bld(bld_base, false, src_bit_size[0]);
905 if (src_bit_size[0] == 64)
906 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
907 if (src_bit_size[0] < 32)
908 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
909 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
910 result = lp_build_shr(int_bld, src[0], src[1]);
911 break;
912 }
913 case nir_op_isign:
914 result = lp_build_sgn(get_int_bld(bld_base, false, src_bit_size[0]), src[0]);
915 break;
916 case nir_op_isub:
917 result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
918 src[0], src[1]);
919 break;
920 case nir_op_ixor:
921 result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
922 src[0], src[1]);
923 break;
924 case nir_op_mov:
925 result = src[0];
926 break;
927 case nir_op_unpack_64_2x32_split_x:
928 result = split_64bit(bld_base, src[0], false);
929 break;
930 case nir_op_unpack_64_2x32_split_y:
931 result = split_64bit(bld_base, src[0], true);
932 break;
933
934 case nir_op_pack_32_2x16_split: {
935 LLVMValueRef tmp = merge_16bit(bld_base, src[0], src[1]);
936 result = LLVMBuildBitCast(builder, tmp, bld_base->base.vec_type, "");
937 break;
938 }
939 case nir_op_unpack_32_2x16_split_x:
940 result = split_16bit(bld_base, src[0], false);
941 break;
942 case nir_op_unpack_32_2x16_split_y:
943 result = split_16bit(bld_base, src[0], true);
944 break;
945 case nir_op_pack_64_2x32_split: {
946 LLVMValueRef tmp = merge_64bit(bld_base, src[0], src[1]);
947 result = LLVMBuildBitCast(builder, tmp, bld_base->uint64_bld.vec_type, "");
948 break;
949 }
950 case nir_op_pack_32_4x8_split: {
951 LLVMValueRef tmp1 = merge_16bit(bld_base, src[0], src[1]);
952 LLVMValueRef tmp2 = merge_16bit(bld_base, src[2], src[3]);
953 tmp1 = LLVMBuildBitCast(builder, tmp1, bld_base->uint16_bld.vec_type, "");
954 tmp2 = LLVMBuildBitCast(builder, tmp2, bld_base->uint16_bld.vec_type, "");
955 LLVMValueRef tmp = merge_16bit(bld_base, tmp1, tmp2);
956 result = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.vec_type, "");
957 break;
958 }
959 case nir_op_u2f16:
960 result = LLVMBuildUIToFP(builder, src[0],
961 bld_base->half_bld.vec_type, "");
962 break;
963 case nir_op_u2f32:
964 result = LLVMBuildUIToFP(builder, src[0], bld_base->base.vec_type, "");
965 break;
966 case nir_op_u2f64:
967 result = LLVMBuildUIToFP(builder, src[0], bld_base->dbl_bld.vec_type, "");
968 break;
969 case nir_op_u2u8:
970 result = LLVMBuildTrunc(builder, src[0], bld_base->uint8_bld.vec_type, "");
971 break;
972 case nir_op_u2u16:
973 if (src_bit_size[0] < 16)
974 result = LLVMBuildZExt(builder, src[0], bld_base->uint16_bld.vec_type, "");
975 else
976 result = LLVMBuildTrunc(builder, src[0], bld_base->uint16_bld.vec_type, "");
977 break;
978 case nir_op_u2u32:
979 if (src_bit_size[0] < 32)
980 result = LLVMBuildZExt(builder, src[0], bld_base->uint_bld.vec_type, "");
981 else
982 result = LLVMBuildTrunc(builder, src[0], bld_base->uint_bld.vec_type, "");
983 break;
984 case nir_op_u2u64:
985 result = LLVMBuildZExt(builder, src[0], bld_base->uint64_bld.vec_type, "");
986 break;
987 case nir_op_udiv:
988 result = do_int_divide(bld_base, true, src_bit_size[0], src[0], src[1]);
989 break;
990 case nir_op_ufind_msb: {
991 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
992 result = lp_build_ctlz(uint_bld, src[0]);
993 result = lp_build_sub(uint_bld, lp_build_const_int_vec(gallivm, uint_bld->type, src_bit_size[0] - 1), result);
994 if (src_bit_size[0] < 32)
995 result = LLVMBuildZExt(builder, result, bld_base->uint_bld.vec_type, "");
996 else
997 result = LLVMBuildTrunc(builder, result, bld_base->uint_bld.vec_type, "");
998 break;
999 }
1000 case nir_op_uge32:
1001 result = icmp32(bld_base, PIPE_FUNC_GEQUAL, true, src_bit_size[0], src);
1002 break;
1003 case nir_op_ult32:
1004 result = icmp32(bld_base, PIPE_FUNC_LESS, true, src_bit_size[0], src);
1005 break;
1006 case nir_op_umax:
1007 result = lp_build_max(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1008 break;
1009 case nir_op_umin:
1010 result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
1011 break;
1012 case nir_op_umod:
1013 result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
1014 break;
1015 case nir_op_umul_high: {
1016 LLVMValueRef hi_bits;
1017 lp_build_mul_32_lohi(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1], &hi_bits);
1018 result = hi_bits;
1019 break;
1020 }
1021 case nir_op_ushr: {
1022 struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
1023 if (src_bit_size[0] == 64)
1024 src[1] = LLVMBuildZExt(builder, src[1], uint_bld->vec_type, "");
1025 if (src_bit_size[0] < 32)
1026 src[1] = LLVMBuildTrunc(builder, src[1], uint_bld->vec_type, "");
1027 src[1] = lp_build_and(uint_bld, src[1], lp_build_const_int_vec(gallivm, uint_bld->type, (src_bit_size[0] - 1)));
1028 result = lp_build_shr(uint_bld, src[0], src[1]);
1029 break;
1030 }
1031 case nir_op_bcsel: {
1032 LLVMTypeRef src1_type = LLVMTypeOf(src[1]);
1033 LLVMTypeRef src2_type = LLVMTypeOf(src[2]);
1034
1035 if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
1036 LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
1037 src[2] = LLVMBuildIntToPtr(builder, src[2], src1_type, "");
1038 } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
1039 LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
1040 src[1] = LLVMBuildIntToPtr(builder, src[1], src2_type, "");
1041 }
1042
1043 for (int i = 1; i <= 2; i++) {
1044 LLVMTypeRef type = LLVMTypeOf(src[i]);
1045 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
1046 break;
1047 src[i] = LLVMBuildBitCast(builder, src[i], get_int_bld(bld_base, true, src_bit_size[i])->vec_type, "");
1048 }
1049 return LLVMBuildSelect(builder, src[0], src[1], src[2], "");
1050 }
1051 default:
1052 assert(0);
1053 break;
1054 }
1055 return result;
1056 }
1057
1058
1059 static void
visit_alu(struct lp_build_nir_context * bld_base,const nir_alu_instr * instr)1060 visit_alu(struct lp_build_nir_context *bld_base,
1061 const nir_alu_instr *instr)
1062 {
1063 struct gallivm_state *gallivm = bld_base->base.gallivm;
1064 LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
1065 unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
1066 const unsigned num_components = instr->def.num_components;
1067 unsigned src_components;
1068
1069 switch (instr->op) {
1070 case nir_op_vec2:
1071 case nir_op_vec3:
1072 case nir_op_vec4:
1073 case nir_op_vec8:
1074 case nir_op_vec16:
1075 src_components = 1;
1076 break;
1077 case nir_op_pack_half_2x16:
1078 src_components = 2;
1079 break;
1080 case nir_op_unpack_half_2x16:
1081 src_components = 1;
1082 break;
1083 case nir_op_cube_amd:
1084 src_components = 3;
1085 break;
1086 case nir_op_fsum2:
1087 case nir_op_fsum3:
1088 case nir_op_fsum4:
1089 src_components = nir_op_infos[instr->op].input_sizes[0];
1090 break;
1091 default:
1092 src_components = num_components;
1093 break;
1094 }
1095
1096 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1097 src[i] = get_alu_src(bld_base, instr->src[i], src_components);
1098 src_bit_size[i] = nir_src_bit_size(instr->src[i].src);
1099 }
1100
1101 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1102 if (instr->op == nir_op_vec4 ||
1103 instr->op == nir_op_vec3 ||
1104 instr->op == nir_op_vec2 ||
1105 instr->op == nir_op_vec8 ||
1106 instr->op == nir_op_vec16) {
1107 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1108 result[i] = cast_type(bld_base, src[i],
1109 nir_op_infos[instr->op].input_types[i],
1110 src_bit_size[i]);
1111 }
1112 } else if (instr->op == nir_op_fsum4 ||
1113 instr->op == nir_op_fsum3 ||
1114 instr->op == nir_op_fsum2) {
1115 for (unsigned c = 0; c < nir_op_infos[instr->op].input_sizes[0]; c++) {
1116 LLVMValueRef temp_chan = LLVMBuildExtractValue(gallivm->builder,
1117 src[0], c, "");
1118 temp_chan = cast_type(bld_base, temp_chan,
1119 nir_op_infos[instr->op].input_types[0],
1120 src_bit_size[0]);
1121 result[0] = (c == 0) ? temp_chan
1122 : lp_build_add(get_flt_bld(bld_base, src_bit_size[0]),
1123 result[0], temp_chan);
1124 }
1125 } else if (is_aos(bld_base)) {
1126 result[0] = do_alu_action(bld_base, instr, src_bit_size, src);
1127 } else {
1128 /* Loop for R,G,B,A channels */
1129 for (unsigned c = 0; c < num_components; c++) {
1130 LLVMValueRef src_chan[NIR_MAX_VEC_COMPONENTS];
1131
1132 /* Loop over instruction operands */
1133 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1134 if (num_components > 1) {
1135 src_chan[i] = LLVMBuildExtractValue(gallivm->builder,
1136 src[i], c, "");
1137 } else {
1138 src_chan[i] = src[i];
1139 }
1140 src_chan[i] = cast_type(bld_base, src_chan[i],
1141 nir_op_infos[instr->op].input_types[i],
1142 src_bit_size[i]);
1143 }
1144 result[c] = do_alu_action(bld_base, instr, src_bit_size, src_chan);
1145 result[c] = cast_type(bld_base, result[c],
1146 nir_op_infos[instr->op].output_type,
1147 instr->def.bit_size);
1148 }
1149 }
1150 assign_ssa_dest(bld_base, &instr->def, result);
1151 }
1152
1153
1154 static void
visit_load_const(struct lp_build_nir_context * bld_base,const nir_load_const_instr * instr)1155 visit_load_const(struct lp_build_nir_context *bld_base,
1156 const nir_load_const_instr *instr)
1157 {
1158 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
1159 bld_base->load_const(bld_base, instr, result);
1160 assign_ssa_dest(bld_base, &instr->def, result);
1161 }
1162
1163
1164 static void
get_deref_offset(struct lp_build_nir_context * bld_base,nir_deref_instr * instr,bool vs_in,unsigned * vertex_index_out,LLVMValueRef * vertex_index_ref,unsigned * const_out,LLVMValueRef * indir_out)1165 get_deref_offset(struct lp_build_nir_context *bld_base, nir_deref_instr *instr,
1166 bool vs_in, unsigned *vertex_index_out,
1167 LLVMValueRef *vertex_index_ref,
1168 unsigned *const_out, LLVMValueRef *indir_out)
1169 {
1170 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1171 nir_variable *var = nir_deref_instr_get_variable(instr);
1172 nir_deref_path path;
1173 unsigned idx_lvl = 1;
1174
1175 nir_deref_path_init(&path, instr, NULL);
1176
1177 if (vertex_index_out != NULL || vertex_index_ref != NULL) {
1178 if (vertex_index_ref) {
1179 *vertex_index_ref = get_src(bld_base, path.path[idx_lvl]->arr.index);
1180 if (vertex_index_out)
1181 *vertex_index_out = 0;
1182 } else {
1183 *vertex_index_out = nir_src_as_uint(path.path[idx_lvl]->arr.index);
1184 }
1185 ++idx_lvl;
1186 }
1187
1188 uint32_t const_offset = 0;
1189 LLVMValueRef offset = NULL;
1190
1191 if (var->data.compact && nir_src_is_const(instr->arr.index)) {
1192 assert(instr->deref_type == nir_deref_type_array);
1193 const_offset = nir_src_as_uint(instr->arr.index);
1194 goto out;
1195 }
1196
1197 for (; path.path[idx_lvl]; ++idx_lvl) {
1198 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
1199 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
1200 unsigned index = path.path[idx_lvl]->strct.index;
1201
1202 for (unsigned i = 0; i < index; i++) {
1203 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
1204 const_offset += glsl_count_attribute_slots(ft, vs_in);
1205 }
1206 } else if (path.path[idx_lvl]->deref_type == nir_deref_type_array) {
1207 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
1208 if (nir_src_is_const(path.path[idx_lvl]->arr.index)) {
1209 const_offset += nir_src_comp_as_int(path.path[idx_lvl]->arr.index, 0) * size;
1210 } else {
1211 LLVMValueRef idx_src = get_src(bld_base, path.path[idx_lvl]->arr.index);
1212 idx_src = cast_type(bld_base, idx_src, nir_type_uint, 32);
1213 LLVMValueRef array_off = lp_build_mul(&bld_base->uint_bld, lp_build_const_int_vec(bld_base->base.gallivm, bld_base->base.type, size),
1214 idx_src);
1215 if (offset)
1216 offset = lp_build_add(&bld_base->uint_bld, offset, array_off);
1217 else
1218 offset = array_off;
1219 }
1220 } else
1221 unreachable("Uhandled deref type in get_deref_instr_offset");
1222 }
1223
1224 out:
1225 nir_deref_path_finish(&path);
1226
1227 if (const_offset && offset)
1228 offset = LLVMBuildAdd(builder, offset,
1229 lp_build_const_int_vec(bld_base->base.gallivm, bld_base->uint_bld.type, const_offset),
1230 "");
1231 *const_out = const_offset;
1232 *indir_out = offset;
1233 }
1234
1235
1236 static void
visit_load_input(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1237 visit_load_input(struct lp_build_nir_context *bld_base,
1238 nir_intrinsic_instr *instr,
1239 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1240 {
1241 nir_variable var = {0};
1242 var.data.location = nir_intrinsic_io_semantics(instr).location;
1243 var.data.driver_location = nir_intrinsic_base(instr);
1244 var.data.location_frac = nir_intrinsic_component(instr);
1245
1246 unsigned nc = instr->def.num_components;
1247 unsigned bit_size = instr->def.bit_size;
1248
1249 nir_src offset = *nir_get_io_offset_src(instr);
1250 bool indirect = !nir_src_is_const(offset);
1251 if (!indirect)
1252 assert(nir_src_as_uint(offset) == 0);
1253 LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1254
1255 bld_base->load_var(bld_base, nir_var_shader_in, nc, bit_size, &var, 0, NULL, 0, indir_index, result);
1256 }
1257
1258
1259 static void
visit_store_output(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1260 visit_store_output(struct lp_build_nir_context *bld_base,
1261 nir_intrinsic_instr *instr)
1262 {
1263 nir_variable var = {0};
1264 var.data.location = nir_intrinsic_io_semantics(instr).location;
1265 var.data.driver_location = nir_intrinsic_base(instr);
1266 var.data.location_frac = nir_intrinsic_component(instr);
1267
1268 unsigned mask = nir_intrinsic_write_mask(instr);
1269
1270 unsigned bit_size = nir_src_bit_size(instr->src[0]);
1271 LLVMValueRef src = get_src(bld_base, instr->src[0]);
1272
1273 nir_src offset = *nir_get_io_offset_src(instr);
1274 bool indirect = !nir_src_is_const(offset);
1275 if (!indirect)
1276 assert(nir_src_as_uint(offset) == 0);
1277 LLVMValueRef indir_index = indirect ? get_src(bld_base, offset) : NULL;
1278
1279 if (mask == 0x1 && LLVMGetTypeKind(LLVMTypeOf(src)) == LLVMArrayTypeKind) {
1280 src = LLVMBuildExtractValue(bld_base->base.gallivm->builder,
1281 src, 0, "");
1282 }
1283
1284 bld_base->store_var(bld_base, nir_var_shader_out, util_last_bit(mask),
1285 bit_size, &var, mask, NULL, 0, indir_index, src);
1286 }
1287
1288
1289 static void
visit_load_reg(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1290 visit_load_reg(struct lp_build_nir_context *bld_base,
1291 nir_intrinsic_instr *instr,
1292 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1293 {
1294 struct gallivm_state *gallivm = bld_base->base.gallivm;
1295 LLVMBuilderRef builder = gallivm->builder;
1296
1297 nir_intrinsic_instr *decl = nir_reg_get_decl(instr->src[0].ssa);
1298 unsigned base = nir_intrinsic_base(instr);
1299
1300 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, decl);
1301 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
1302
1303 unsigned bit_size = nir_intrinsic_bit_size(decl);
1304 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, bit_size);
1305
1306 LLVMValueRef indir_src = NULL;
1307 if (instr->intrinsic == nir_intrinsic_load_reg_indirect) {
1308 indir_src = cast_type(bld_base, get_src(bld_base, instr->src[1]),
1309 nir_type_uint, 32);
1310 }
1311
1312 LLVMValueRef val = bld_base->load_reg(bld_base, reg_bld, decl, base, indir_src, reg_storage);
1313
1314 if (!is_aos(bld_base) && instr->def.num_components > 1) {
1315 for (unsigned i = 0; i < instr->def.num_components; i++)
1316 result[i] = LLVMBuildExtractValue(builder, val, i, "");
1317 } else {
1318 result[0] = val;
1319 }
1320 }
1321
1322
1323 static void
visit_store_reg(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1324 visit_store_reg(struct lp_build_nir_context *bld_base,
1325 nir_intrinsic_instr *instr)
1326 {
1327 struct gallivm_state *gallivm = bld_base->base.gallivm;
1328 LLVMBuilderRef builder = gallivm->builder;
1329
1330 nir_intrinsic_instr *decl = nir_reg_get_decl(instr->src[1].ssa);
1331 unsigned base = nir_intrinsic_base(instr);
1332 unsigned write_mask = nir_intrinsic_write_mask(instr);
1333 assert(write_mask != 0x0);
1334
1335 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1336 LLVMValueRef vals[NIR_MAX_VEC_COMPONENTS] = { NULL };
1337 if (!is_aos(bld_base) && nir_src_num_components(instr->src[0]) > 1) {
1338 for (unsigned i = 0; i < nir_src_num_components(instr->src[0]); i++)
1339 vals[i] = LLVMBuildExtractValue(builder, val, i, "");
1340 } else {
1341 vals[0] = val;
1342 }
1343
1344 struct hash_entry *entry = _mesa_hash_table_search(bld_base->regs, decl);
1345 LLVMValueRef reg_storage = (LLVMValueRef)entry->data;
1346
1347 unsigned bit_size = nir_intrinsic_bit_size(decl);
1348 struct lp_build_context *reg_bld = get_int_bld(bld_base, true, bit_size);
1349
1350 LLVMValueRef indir_src = NULL;
1351 if (instr->intrinsic == nir_intrinsic_store_reg_indirect) {
1352 indir_src = cast_type(bld_base, get_src(bld_base, instr->src[2]),
1353 nir_type_uint, 32);
1354 }
1355
1356 bld_base->store_reg(bld_base, reg_bld, decl, write_mask, base,
1357 indir_src, reg_storage, vals);
1358 }
1359
1360
1361 static bool
compact_array_index_oob(struct lp_build_nir_context * bld_base,nir_variable * var,const uint32_t index)1362 compact_array_index_oob(struct lp_build_nir_context *bld_base, nir_variable *var, const uint32_t index)
1363 {
1364 const struct glsl_type *type = var->type;
1365 if (nir_is_arrayed_io(var, bld_base->shader->info.stage)) {
1366 assert(glsl_type_is_array(type));
1367 type = glsl_get_array_element(type);
1368 }
1369 return index >= glsl_get_length(type);
1370 }
1371
1372 static void
visit_load_var(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1373 visit_load_var(struct lp_build_nir_context *bld_base,
1374 nir_intrinsic_instr *instr,
1375 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1376 {
1377 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1378 nir_variable *var = nir_deref_instr_get_variable(deref);
1379 assert(util_bitcount(deref->modes) == 1);
1380 nir_variable_mode mode = deref->modes;
1381 unsigned const_index = 0;
1382 LLVMValueRef indir_index = NULL;
1383 LLVMValueRef indir_vertex_index = NULL;
1384 unsigned vertex_index = 0;
1385 unsigned nc = instr->def.num_components;
1386 unsigned bit_size = instr->def.bit_size;
1387 if (var) {
1388 bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
1389 var->data.mode == nir_var_shader_in;
1390 bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
1391 var->data.mode == nir_var_shader_in;
1392 bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1393 var->data.mode == nir_var_shader_in;
1394 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1395 var->data.mode == nir_var_shader_out && !var->data.patch;
1396 bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
1397 var->data.mode == nir_var_shader_in && !var->data.patch;
1398
1399 mode = var->data.mode;
1400
1401 get_deref_offset(bld_base, deref, vs_in,
1402 gs_in ? &vertex_index : NULL,
1403 (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
1404 &const_index, &indir_index);
1405
1406 /* Return undef for loads definitely outside of the array bounds
1407 * (tcs-tes-levels-out-of-bounds-read.shader_test).
1408 */
1409 if (var->data.compact && compact_array_index_oob(bld_base, var, const_index)) {
1410 struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
1411 instr->def.bit_size);
1412 for (int i = 0; i < instr->def.num_components; i++)
1413 result[i] = LLVMGetUndef(undef_bld->vec_type);
1414 return;
1415 }
1416 }
1417 bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index,
1418 indir_vertex_index, const_index, indir_index, result);
1419 }
1420
1421
1422 static void
visit_store_var(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1423 visit_store_var(struct lp_build_nir_context *bld_base,
1424 nir_intrinsic_instr *instr)
1425 {
1426 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1427 nir_variable *var = nir_deref_instr_get_variable(deref);
1428 assert(util_bitcount(deref->modes) == 1);
1429 nir_variable_mode mode = deref->modes;
1430 int writemask = instr->const_index[0];
1431 unsigned bit_size = nir_src_bit_size(instr->src[1]);
1432 LLVMValueRef src = get_src(bld_base, instr->src[1]);
1433 unsigned const_index = 0;
1434 LLVMValueRef indir_index = NULL, indir_vertex_index = NULL;
1435 if (var) {
1436 bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
1437 var->data.mode == nir_var_shader_out && !var->data.patch;
1438 bool mesh_out = bld_base->shader->info.stage == MESA_SHADER_MESH &&
1439 var->data.mode == nir_var_shader_out;
1440 get_deref_offset(bld_base, deref, false, NULL,
1441 (tcs_out || mesh_out) ? &indir_vertex_index : NULL,
1442 &const_index, &indir_index);
1443
1444 /* Skip stores definitely outside of the array bounds
1445 * (tcs-tes-levels-out-of-bounds-write.shader_test).
1446 */
1447 if (var->data.compact && compact_array_index_oob(bld_base, var, const_index))
1448 return;
1449 }
1450 bld_base->store_var(bld_base, mode, instr->num_components, bit_size,
1451 var, writemask, indir_vertex_index, const_index,
1452 indir_index, src);
1453 }
1454
1455
1456 static void
visit_load_ubo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1457 visit_load_ubo(struct lp_build_nir_context *bld_base,
1458 nir_intrinsic_instr *instr,
1459 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1460 {
1461 struct gallivm_state *gallivm = bld_base->base.gallivm;
1462 LLVMBuilderRef builder = gallivm->builder;
1463 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1464 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1465
1466 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1467
1468 if (nir_src_num_components(instr->src[0]) == 1)
1469 idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
1470
1471 bld_base->load_ubo(bld_base, instr->def.num_components,
1472 instr->def.bit_size,
1473 offset_is_uniform, idx, offset, result);
1474 }
1475
1476
1477 static void
visit_load_push_constant(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[4])1478 visit_load_push_constant(struct lp_build_nir_context *bld_base,
1479 nir_intrinsic_instr *instr,
1480 LLVMValueRef result[4])
1481 {
1482 struct gallivm_state *gallivm = bld_base->base.gallivm;
1483 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1484 LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
1485 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1486
1487 bld_base->load_ubo(bld_base, instr->def.num_components,
1488 instr->def.bit_size,
1489 offset_is_uniform, idx, offset, result);
1490 }
1491
1492
1493 static void
visit_load_ssbo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1494 visit_load_ssbo(struct lp_build_nir_context *bld_base,
1495 nir_intrinsic_instr *instr,
1496 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1497 {
1498 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1499 if (nir_src_num_components(instr->src[0]) == 1)
1500 idx = cast_type(bld_base, idx, nir_type_uint, 32);
1501
1502 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1503 bool index_and_offset_are_uniform =
1504 nir_src_is_always_uniform(instr->src[0]) &&
1505 nir_src_is_always_uniform(instr->src[1]);
1506 bld_base->load_mem(bld_base, instr->def.num_components,
1507 instr->def.bit_size,
1508 index_and_offset_are_uniform, false, idx, offset, result);
1509 }
1510
1511
1512 static void
visit_store_ssbo(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1513 visit_store_ssbo(struct lp_build_nir_context *bld_base,
1514 nir_intrinsic_instr *instr)
1515 {
1516 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1517
1518 LLVMValueRef idx = get_src(bld_base, instr->src[1]);
1519 if (nir_src_num_components(instr->src[1]) == 1)
1520 idx = cast_type(bld_base, idx, nir_type_uint, 32);
1521
1522 LLVMValueRef offset = get_src(bld_base, instr->src[2]);
1523 bool index_and_offset_are_uniform =
1524 nir_src_is_always_uniform(instr->src[1]) &&
1525 nir_src_is_always_uniform(instr->src[2]);
1526 int writemask = instr->const_index[0];
1527 int nc = nir_src_num_components(instr->src[0]);
1528 int bitsize = nir_src_bit_size(instr->src[0]);
1529 bld_base->store_mem(bld_base, writemask, nc, bitsize,
1530 index_and_offset_are_uniform, false, idx, offset, val);
1531 }
1532
1533
1534 static void
visit_get_ssbo_size(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1535 visit_get_ssbo_size(struct lp_build_nir_context *bld_base,
1536 nir_intrinsic_instr *instr,
1537 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1538 {
1539 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1540 if (nir_src_num_components(instr->src[0]) == 1)
1541 idx = cast_type(bld_base, idx, nir_type_uint, 32);
1542
1543 result[0] = bld_base->get_ssbo_size(bld_base, idx);
1544 }
1545
1546
1547 static void
visit_ssbo_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1548 visit_ssbo_atomic(struct lp_build_nir_context *bld_base,
1549 nir_intrinsic_instr *instr,
1550 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1551 {
1552 LLVMValueRef idx = get_src(bld_base, instr->src[0]);
1553 if (nir_src_num_components(instr->src[0]) == 1)
1554 idx = cast_type(bld_base, idx, nir_type_uint, 32);
1555
1556 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1557 LLVMValueRef val = get_src(bld_base, instr->src[2]);
1558 LLVMValueRef val2 = NULL;
1559 int bitsize = nir_src_bit_size(instr->src[2]);
1560 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_swap)
1561 val2 = get_src(bld_base, instr->src[3]);
1562
1563 bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, false, idx,
1564 offset, val, val2, &result[0]);
1565 }
1566
1567 static void
img_params_init_resource(struct lp_build_nir_context * bld_base,struct lp_img_params * params,nir_src src)1568 img_params_init_resource(struct lp_build_nir_context *bld_base, struct lp_img_params *params, nir_src src)
1569 {
1570 if (nir_src_num_components(src) == 1) {
1571 if (nir_src_is_const(src))
1572 params->image_index = nir_src_as_int(src);
1573 else
1574 params->image_index_offset = get_src(bld_base, src);
1575
1576 return;
1577 }
1578
1579 params->resource = get_src(bld_base, src);
1580 }
1581
1582 static void
sampler_size_params_init_resource(struct lp_build_nir_context * bld_base,struct lp_sampler_size_query_params * params,nir_src src)1583 sampler_size_params_init_resource(struct lp_build_nir_context *bld_base, struct lp_sampler_size_query_params *params, nir_src src)
1584 {
1585 if (nir_src_num_components(src) == 1) {
1586 if (nir_src_is_const(src))
1587 params->texture_unit = nir_src_as_int(src);
1588 else
1589 params->texture_unit_offset = get_src(bld_base, src);
1590
1591 return;
1592 }
1593
1594 params->resource = get_src(bld_base, src);
1595 }
1596
1597 static void
visit_load_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1598 visit_load_image(struct lp_build_nir_context *bld_base,
1599 nir_intrinsic_instr *instr,
1600 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1601 {
1602 struct gallivm_state *gallivm = bld_base->base.gallivm;
1603 LLVMBuilderRef builder = gallivm->builder;
1604 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1605 LLVMValueRef coords[5];
1606 struct lp_img_params params = { 0 };
1607
1608 params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1609 nir_intrinsic_image_array(instr));
1610 for (unsigned i = 0; i < 4; i++)
1611 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1612 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1613 coords[2] = coords[1];
1614
1615 params.coords = coords;
1616 params.outdata = result;
1617 lp_img_op_from_intrinsic(¶ms, instr);
1618 if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1619 nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS)
1620 params.ms_index = cast_type(bld_base, get_src(bld_base, instr->src[2]),
1621 nir_type_uint, 32);
1622
1623 img_params_init_resource(bld_base, ¶ms, instr->src[0]);
1624 params.format = nir_intrinsic_format(instr);
1625
1626 bld_base->image_op(bld_base, ¶ms);
1627 }
1628
1629
1630 static void
visit_store_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1631 visit_store_image(struct lp_build_nir_context *bld_base,
1632 nir_intrinsic_instr *instr)
1633 {
1634 struct gallivm_state *gallivm = bld_base->base.gallivm;
1635 LLVMBuilderRef builder = gallivm->builder;
1636 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1637 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1638 LLVMValueRef coords[5];
1639 struct lp_img_params params = { 0 };
1640
1641 params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr), nir_intrinsic_image_array(instr));
1642 for (unsigned i = 0; i < 4; i++)
1643 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1644 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1645 coords[2] = coords[1];
1646 params.coords = coords;
1647
1648 params.format = nir_intrinsic_format(instr);
1649
1650 const struct util_format_description *desc = util_format_description(params.format);
1651 bool integer = desc->channel[util_format_get_first_non_void_channel(params.format)].pure_integer;
1652
1653 for (unsigned i = 0; i < 4; i++) {
1654 params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
1655
1656 if (integer)
1657 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->int_bld.vec_type, "");
1658 else
1659 params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
1660 }
1661 if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1662 params.ms_index = get_src(bld_base, instr->src[2]);
1663 params.img_op = LP_IMG_STORE;
1664
1665 img_params_init_resource(bld_base, ¶ms, instr->src[0]);
1666
1667 if (params.target == PIPE_TEXTURE_1D_ARRAY)
1668 coords[2] = coords[1];
1669 bld_base->image_op(bld_base, ¶ms);
1670 }
1671
1672 LLVMAtomicRMWBinOp
lp_translate_atomic_op(nir_atomic_op op)1673 lp_translate_atomic_op(nir_atomic_op op)
1674 {
1675 switch (op) {
1676 case nir_atomic_op_iadd: return LLVMAtomicRMWBinOpAdd;
1677 case nir_atomic_op_xchg: return LLVMAtomicRMWBinOpXchg;
1678 case nir_atomic_op_iand: return LLVMAtomicRMWBinOpAnd;
1679 case nir_atomic_op_ior: return LLVMAtomicRMWBinOpOr;
1680 case nir_atomic_op_ixor: return LLVMAtomicRMWBinOpXor;
1681 case nir_atomic_op_umin: return LLVMAtomicRMWBinOpUMin;
1682 case nir_atomic_op_umax: return LLVMAtomicRMWBinOpUMax;
1683 case nir_atomic_op_imin: return LLVMAtomicRMWBinOpMin;
1684 case nir_atomic_op_imax: return LLVMAtomicRMWBinOpMax;
1685 case nir_atomic_op_fadd: return LLVMAtomicRMWBinOpFAdd;
1686 #if LLVM_VERSION_MAJOR >= 15
1687 case nir_atomic_op_fmin: return LLVMAtomicRMWBinOpFMin;
1688 case nir_atomic_op_fmax: return LLVMAtomicRMWBinOpFMax;
1689 #endif
1690 default: unreachable("Unexpected atomic");
1691 }
1692 }
1693
1694 void
lp_img_op_from_intrinsic(struct lp_img_params * params,nir_intrinsic_instr * instr)1695 lp_img_op_from_intrinsic(struct lp_img_params *params, nir_intrinsic_instr *instr)
1696 {
1697 if (instr->intrinsic == nir_intrinsic_image_load ||
1698 instr->intrinsic == nir_intrinsic_bindless_image_load) {
1699 params->img_op = LP_IMG_LOAD;
1700 return;
1701 }
1702
1703 if (instr->intrinsic == nir_intrinsic_bindless_image_sparse_load) {
1704 params->img_op = LP_IMG_LOAD_SPARSE;
1705 return;
1706 }
1707
1708 if (instr->intrinsic == nir_intrinsic_image_store ||
1709 instr->intrinsic == nir_intrinsic_bindless_image_store) {
1710 params->img_op = LP_IMG_STORE;
1711 return;
1712 }
1713
1714 if (instr->intrinsic == nir_intrinsic_image_atomic_swap ||
1715 instr->intrinsic == nir_intrinsic_bindless_image_atomic_swap) {
1716 params->img_op = LP_IMG_ATOMIC_CAS;
1717 return;
1718 }
1719
1720 if (instr->intrinsic == nir_intrinsic_image_atomic ||
1721 instr->intrinsic == nir_intrinsic_bindless_image_atomic) {
1722 params->img_op = LP_IMG_ATOMIC;
1723 params->op = lp_translate_atomic_op(nir_intrinsic_atomic_op(instr));
1724 } else {
1725 params->img_op = -1;
1726 }
1727 }
1728
1729
1730 static void
visit_atomic_image(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1731 visit_atomic_image(struct lp_build_nir_context *bld_base,
1732 nir_intrinsic_instr *instr,
1733 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1734 {
1735 struct gallivm_state *gallivm = bld_base->base.gallivm;
1736 LLVMBuilderRef builder = gallivm->builder;
1737 struct lp_img_params params = { 0 };
1738 LLVMValueRef coord_val = get_src(bld_base, instr->src[1]);
1739 LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
1740 LLVMValueRef coords[5];
1741
1742 params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1743 nir_intrinsic_image_array(instr));
1744 for (unsigned i = 0; i < 4; i++) {
1745 coords[i] = LLVMBuildExtractValue(builder, coord_val, i, "");
1746 }
1747 if (params.target == PIPE_TEXTURE_1D_ARRAY) {
1748 coords[2] = coords[1];
1749 }
1750
1751 params.coords = coords;
1752
1753 params.format = nir_intrinsic_format(instr);
1754
1755 const struct util_format_description *desc = util_format_description(params.format);
1756 bool integer = desc->channel[util_format_get_first_non_void_channel(params.format)].pure_integer;
1757
1758 if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS)
1759 params.ms_index = get_src(bld_base, instr->src[2]);
1760
1761 if (instr->intrinsic == nir_intrinsic_image_atomic_swap ||
1762 instr->intrinsic == nir_intrinsic_bindless_image_atomic_swap) {
1763 LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
1764 params.indata[0] = in_val;
1765 params.indata2[0] = cas_val;
1766
1767 if (integer)
1768 params.indata2[0] = LLVMBuildBitCast(builder, params.indata2[0], bld_base->int_bld.vec_type, "");
1769 else
1770 params.indata2[0] = LLVMBuildBitCast(builder, params.indata2[0], bld_base->base.vec_type, "");
1771 } else {
1772 params.indata[0] = in_val;
1773 }
1774
1775 if (integer)
1776 params.indata[0] = LLVMBuildBitCast(builder, params.indata[0], bld_base->int_bld.vec_type, "");
1777 else
1778 params.indata[0] = LLVMBuildBitCast(builder, params.indata[0], bld_base->base.vec_type, "");
1779
1780 params.outdata = result;
1781
1782 lp_img_op_from_intrinsic(¶ms, instr);
1783
1784 img_params_init_resource(bld_base, ¶ms, instr->src[0]);
1785
1786 bld_base->image_op(bld_base, ¶ms);
1787 }
1788
1789
1790 static void
visit_image_size(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1791 visit_image_size(struct lp_build_nir_context *bld_base,
1792 nir_intrinsic_instr *instr,
1793 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1794 {
1795 struct lp_sampler_size_query_params params = { 0 };
1796
1797 sampler_size_params_init_resource(bld_base, ¶ms, instr->src[0]);
1798
1799 params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1800 nir_intrinsic_image_array(instr));
1801 params.sizes_out = result;
1802 params.ms = nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1803 nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS;
1804 params.format = nir_intrinsic_format(instr);
1805
1806 bld_base->image_size(bld_base, ¶ms);
1807 }
1808
1809
1810 static void
visit_image_samples(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1811 visit_image_samples(struct lp_build_nir_context *bld_base,
1812 nir_intrinsic_instr *instr,
1813 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1814 {
1815 struct lp_sampler_size_query_params params = { 0 };
1816
1817 sampler_size_params_init_resource(bld_base, ¶ms, instr->src[0]);
1818
1819 params.target = glsl_sampler_to_pipe(nir_intrinsic_image_dim(instr),
1820 nir_intrinsic_image_array(instr));
1821 params.sizes_out = result;
1822 params.ms = nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_MS ||
1823 nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_SUBPASS_MS;
1824 params.samples_only = true;
1825
1826 params.format = nir_intrinsic_format(instr);
1827
1828 bld_base->image_size(bld_base, ¶ms);
1829 }
1830
1831
1832 static void
visit_shared_load(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1833 visit_shared_load(struct lp_build_nir_context *bld_base,
1834 nir_intrinsic_instr *instr,
1835 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1836 {
1837 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1838 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1839 bld_base->load_mem(bld_base, instr->def.num_components,
1840 instr->def.bit_size,
1841 offset_is_uniform, false, NULL, offset, result);
1842 }
1843
1844
1845 static void
visit_shared_store(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1846 visit_shared_store(struct lp_build_nir_context *bld_base,
1847 nir_intrinsic_instr *instr)
1848 {
1849 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1850 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
1851 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
1852 int writemask = instr->const_index[1];
1853 int nc = nir_src_num_components(instr->src[0]);
1854 int bitsize = nir_src_bit_size(instr->src[0]);
1855 bld_base->store_mem(bld_base, writemask, nc, bitsize,
1856 offset_is_uniform, false, NULL, offset, val);
1857 }
1858
1859
1860 static void
visit_shared_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1861 visit_shared_atomic(struct lp_build_nir_context *bld_base,
1862 nir_intrinsic_instr *instr,
1863 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1864 {
1865 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1866 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1867 LLVMValueRef val2 = NULL;
1868 int bitsize = nir_src_bit_size(instr->src[1]);
1869 if (instr->intrinsic == nir_intrinsic_shared_atomic_swap)
1870 val2 = get_src(bld_base, instr->src[2]);
1871
1872 bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, false, NULL,
1873 offset, val, val2, &result[0]);
1874 }
1875
1876
1877 static void
visit_barrier(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1878 visit_barrier(struct lp_build_nir_context *bld_base,
1879 nir_intrinsic_instr *instr)
1880 {
1881 LLVMBuilderRef builder = bld_base->base.gallivm->builder;
1882 mesa_scope exec_scope = nir_intrinsic_execution_scope(instr);
1883 unsigned nir_semantics = nir_intrinsic_memory_semantics(instr);
1884
1885 if (nir_semantics) {
1886 LLVMAtomicOrdering ordering = LLVMAtomicOrderingSequentiallyConsistent;
1887 LLVMBuildFence(builder, ordering, false, "");
1888 }
1889 if (exec_scope != SCOPE_NONE)
1890 bld_base->barrier(bld_base);
1891 }
1892
1893
1894 static void
visit_discard(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1895 visit_discard(struct lp_build_nir_context *bld_base,
1896 nir_intrinsic_instr *instr)
1897 {
1898 LLVMValueRef cond = NULL;
1899 if (instr->intrinsic == nir_intrinsic_terminate_if) {
1900 cond = get_src(bld_base, instr->src[0]);
1901 cond = cast_type(bld_base, cond, nir_type_int, 32);
1902 }
1903 bld_base->discard(bld_base, cond);
1904 }
1905
1906
1907 static void
visit_load_kernel_input(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1908 visit_load_kernel_input(struct lp_build_nir_context *bld_base,
1909 nir_intrinsic_instr *instr,
1910 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1911 {
1912 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
1913
1914 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1915 bld_base->load_kernel_arg(bld_base, instr->def.num_components,
1916 instr->def.bit_size,
1917 nir_src_bit_size(instr->src[0]),
1918 offset_is_uniform, offset, result);
1919 }
1920
1921
1922 static void
visit_load_global(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1923 visit_load_global(struct lp_build_nir_context *bld_base,
1924 nir_intrinsic_instr *instr,
1925 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1926 {
1927 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1928 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
1929 bld_base->load_global(bld_base, instr->def.num_components,
1930 instr->def.bit_size,
1931 nir_src_bit_size(instr->src[0]),
1932 offset_is_uniform, addr, result);
1933 }
1934
1935
1936 static void
visit_store_global(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)1937 visit_store_global(struct lp_build_nir_context *bld_base,
1938 nir_intrinsic_instr *instr)
1939 {
1940 LLVMValueRef val = get_src(bld_base, instr->src[0]);
1941 int nc = nir_src_num_components(instr->src[0]);
1942 int bitsize = nir_src_bit_size(instr->src[0]);
1943 LLVMValueRef addr = get_src(bld_base, instr->src[1]);
1944 int addr_bitsize = nir_src_bit_size(instr->src[1]);
1945 int writemask = instr->const_index[0];
1946 bld_base->store_global(bld_base, writemask, nc, bitsize,
1947 addr_bitsize, addr, val);
1948 }
1949
1950
1951 static void
visit_global_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1952 visit_global_atomic(struct lp_build_nir_context *bld_base,
1953 nir_intrinsic_instr *instr,
1954 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1955 {
1956 LLVMValueRef addr = get_src(bld_base, instr->src[0]);
1957 LLVMValueRef val = get_src(bld_base, instr->src[1]);
1958 LLVMValueRef val2 = NULL;
1959 int addr_bitsize = nir_src_bit_size(instr->src[0]);
1960 int val_bitsize = nir_src_bit_size(instr->src[1]);
1961 if (instr->intrinsic == nir_intrinsic_global_atomic_swap)
1962 val2 = get_src(bld_base, instr->src[2]);
1963
1964 bld_base->atomic_global(bld_base, nir_intrinsic_atomic_op(instr),
1965 addr_bitsize, val_bitsize, addr, val, val2,
1966 &result[0]);
1967 }
1968
1969 #if LLVM_VERSION_MAJOR >= 10
visit_shuffle(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef dst[4])1970 static void visit_shuffle(struct lp_build_nir_context *bld_base,
1971 nir_intrinsic_instr *instr,
1972 LLVMValueRef dst[4])
1973 {
1974 LLVMValueRef src = get_src(bld_base, instr->src[0]);
1975 src = cast_type(bld_base, src, nir_type_int,
1976 nir_src_bit_size(instr->src[0]));
1977 LLVMValueRef index = get_src(bld_base, instr->src[1]);
1978 index = cast_type(bld_base, index, nir_type_uint,
1979 nir_src_bit_size(instr->src[1]));
1980
1981 bld_base->shuffle(bld_base, src, index, instr, dst);
1982 }
1983 #endif
1984
1985
1986 static void
visit_interp(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])1987 visit_interp(struct lp_build_nir_context *bld_base,
1988 nir_intrinsic_instr *instr,
1989 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
1990 {
1991 struct gallivm_state *gallivm = bld_base->base.gallivm;
1992 LLVMBuilderRef builder = gallivm->builder;
1993 nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
1994 unsigned num_components = instr->def.num_components;
1995 nir_variable *var = nir_deref_instr_get_variable(deref);
1996 unsigned const_index;
1997 LLVMValueRef indir_index;
1998 LLVMValueRef offsets[2] = { NULL, NULL };
1999 get_deref_offset(bld_base, deref, false, NULL, NULL,
2000 &const_index, &indir_index);
2001 bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
2002 bool sample = false;
2003 if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
2004 for (unsigned i = 0; i < 2; i++) {
2005 offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
2006 offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
2007 }
2008 } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
2009 offsets[0] = get_src(bld_base, instr->src[1]);
2010 offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
2011 sample = true;
2012 }
2013 bld_base->interp_at(bld_base, num_components, var, centroid, sample,
2014 const_index, indir_index, offsets, result);
2015 }
2016
2017
2018 static void
visit_load_scratch(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2019 visit_load_scratch(struct lp_build_nir_context *bld_base,
2020 nir_intrinsic_instr *instr,
2021 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2022 {
2023 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2024
2025 bld_base->load_scratch(bld_base, instr->def.num_components,
2026 instr->def.bit_size, offset, result);
2027 }
2028
2029
2030 static void
visit_store_scratch(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2031 visit_store_scratch(struct lp_build_nir_context *bld_base,
2032 nir_intrinsic_instr *instr)
2033 {
2034 LLVMValueRef val = get_src(bld_base, instr->src[0]);
2035 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
2036 int writemask = instr->const_index[2];
2037 int nc = nir_src_num_components(instr->src[0]);
2038 int bitsize = nir_src_bit_size(instr->src[0]);
2039 bld_base->store_scratch(bld_base, writemask, nc, bitsize, offset, val);
2040 }
2041
2042 static void
visit_payload_load(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2043 visit_payload_load(struct lp_build_nir_context *bld_base,
2044 nir_intrinsic_instr *instr,
2045 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2046 {
2047 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2048 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
2049 bld_base->load_mem(bld_base, instr->def.num_components,
2050 instr->def.bit_size,
2051 offset_is_uniform, true, NULL, offset, result);
2052 }
2053
2054 static void
visit_payload_store(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2055 visit_payload_store(struct lp_build_nir_context *bld_base,
2056 nir_intrinsic_instr *instr)
2057 {
2058 LLVMValueRef val = get_src(bld_base, instr->src[0]);
2059 LLVMValueRef offset = get_src(bld_base, instr->src[1]);
2060 bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
2061 int writemask = instr->const_index[1];
2062 int nc = nir_src_num_components(instr->src[0]);
2063 int bitsize = nir_src_bit_size(instr->src[0]);
2064 bld_base->store_mem(bld_base, writemask, nc, bitsize,
2065 offset_is_uniform, true, NULL, offset, val);
2066 }
2067
2068 static void
visit_payload_atomic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2069 visit_payload_atomic(struct lp_build_nir_context *bld_base,
2070 nir_intrinsic_instr *instr,
2071 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2072 {
2073 LLVMValueRef offset = get_src(bld_base, instr->src[0]);
2074 LLVMValueRef val = get_src(bld_base, instr->src[1]);
2075 LLVMValueRef val2 = NULL;
2076 int bitsize = nir_src_bit_size(instr->src[1]);
2077 if (instr->intrinsic == nir_intrinsic_task_payload_atomic_swap)
2078 val2 = get_src(bld_base, instr->src[2]);
2079
2080 bld_base->atomic_mem(bld_base, nir_intrinsic_atomic_op(instr), bitsize, true, NULL,
2081 offset, val, val2, &result[0]);
2082 }
2083
visit_load_param(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr,LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])2084 static void visit_load_param(struct lp_build_nir_context *bld_base,
2085 nir_intrinsic_instr *instr,
2086 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
2087 {
2088 LLVMValueRef param = LLVMGetParam(bld_base->func, nir_intrinsic_param_idx(instr) + LP_RESV_FUNC_ARGS);
2089 struct gallivm_state *gallivm = bld_base->base.gallivm;
2090 if (instr->num_components == 1)
2091 result[0] = param;
2092 else {
2093 for (unsigned i = 0; i < instr->num_components; i++)
2094 result[i] = LLVMBuildExtractValue(gallivm->builder, param, i, "");
2095 }
2096 }
2097
2098 static void
visit_intrinsic(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * instr)2099 visit_intrinsic(struct lp_build_nir_context *bld_base,
2100 nir_intrinsic_instr *instr)
2101 {
2102 LLVMValueRef result[NIR_MAX_VEC_COMPONENTS] = {0};
2103 switch (instr->intrinsic) {
2104 case nir_intrinsic_decl_reg:
2105 /* already handled */
2106 break;
2107 case nir_intrinsic_load_reg:
2108 case nir_intrinsic_load_reg_indirect:
2109 visit_load_reg(bld_base, instr, result);
2110 break;
2111 case nir_intrinsic_store_reg:
2112 case nir_intrinsic_store_reg_indirect:
2113 visit_store_reg(bld_base, instr);
2114 break;
2115 case nir_intrinsic_load_input:
2116 case nir_intrinsic_load_per_primitive_input:
2117 visit_load_input(bld_base, instr, result);
2118 break;
2119 case nir_intrinsic_store_output:
2120 visit_store_output(bld_base, instr);
2121 break;
2122 case nir_intrinsic_load_deref:
2123 visit_load_var(bld_base, instr, result);
2124 break;
2125 case nir_intrinsic_store_deref:
2126 visit_store_var(bld_base, instr);
2127 break;
2128 case nir_intrinsic_load_ubo:
2129 visit_load_ubo(bld_base, instr, result);
2130 break;
2131 case nir_intrinsic_load_push_constant:
2132 visit_load_push_constant(bld_base, instr, result);
2133 break;
2134 case nir_intrinsic_load_ssbo:
2135 visit_load_ssbo(bld_base, instr, result);
2136 break;
2137 case nir_intrinsic_store_ssbo:
2138 visit_store_ssbo(bld_base, instr);
2139 break;
2140 case nir_intrinsic_get_ssbo_size:
2141 visit_get_ssbo_size(bld_base, instr, result);
2142 break;
2143 case nir_intrinsic_load_vertex_id:
2144 case nir_intrinsic_load_primitive_id:
2145 case nir_intrinsic_load_instance_id:
2146 case nir_intrinsic_load_base_instance:
2147 case nir_intrinsic_load_base_vertex:
2148 case nir_intrinsic_load_first_vertex:
2149 case nir_intrinsic_load_workgroup_id:
2150 case nir_intrinsic_load_local_invocation_id:
2151 case nir_intrinsic_load_local_invocation_index:
2152 case nir_intrinsic_load_num_workgroups:
2153 case nir_intrinsic_load_invocation_id:
2154 case nir_intrinsic_load_front_face:
2155 case nir_intrinsic_load_draw_id:
2156 case nir_intrinsic_load_workgroup_size:
2157 case nir_intrinsic_load_work_dim:
2158 case nir_intrinsic_load_tess_coord:
2159 case nir_intrinsic_load_tess_level_outer:
2160 case nir_intrinsic_load_tess_level_inner:
2161 case nir_intrinsic_load_patch_vertices_in:
2162 case nir_intrinsic_load_sample_id:
2163 case nir_intrinsic_load_sample_pos:
2164 case nir_intrinsic_load_sample_mask_in:
2165 case nir_intrinsic_load_view_index:
2166 case nir_intrinsic_load_subgroup_invocation:
2167 case nir_intrinsic_load_subgroup_id:
2168 case nir_intrinsic_load_num_subgroups:
2169 bld_base->sysval_intrin(bld_base, instr, result);
2170 break;
2171 case nir_intrinsic_load_helper_invocation:
2172 bld_base->helper_invocation(bld_base, &result[0]);
2173 break;
2174 case nir_intrinsic_terminate_if:
2175 case nir_intrinsic_terminate:
2176 visit_discard(bld_base, instr);
2177 break;
2178 case nir_intrinsic_emit_vertex:
2179 bld_base->emit_vertex(bld_base, nir_intrinsic_stream_id(instr));
2180 break;
2181 case nir_intrinsic_end_primitive:
2182 bld_base->end_primitive(bld_base, nir_intrinsic_stream_id(instr));
2183 break;
2184 case nir_intrinsic_ssbo_atomic:
2185 case nir_intrinsic_ssbo_atomic_swap:
2186 visit_ssbo_atomic(bld_base, instr, result);
2187 break;
2188 case nir_intrinsic_image_load:
2189 case nir_intrinsic_bindless_image_load:
2190 case nir_intrinsic_bindless_image_sparse_load:
2191 visit_load_image(bld_base, instr, result);
2192 break;
2193 case nir_intrinsic_image_store:
2194 case nir_intrinsic_bindless_image_store:
2195 visit_store_image(bld_base, instr);
2196 break;
2197 case nir_intrinsic_image_atomic:
2198 case nir_intrinsic_image_atomic_swap:
2199 case nir_intrinsic_bindless_image_atomic:
2200 case nir_intrinsic_bindless_image_atomic_swap:
2201 visit_atomic_image(bld_base, instr, result);
2202 break;
2203 case nir_intrinsic_image_size:
2204 case nir_intrinsic_bindless_image_size:
2205 visit_image_size(bld_base, instr, result);
2206 break;
2207 case nir_intrinsic_image_samples:
2208 case nir_intrinsic_bindless_image_samples:
2209 visit_image_samples(bld_base, instr, result);
2210 break;
2211 case nir_intrinsic_load_shared:
2212 visit_shared_load(bld_base, instr, result);
2213 break;
2214 case nir_intrinsic_store_shared:
2215 visit_shared_store(bld_base, instr);
2216 break;
2217 case nir_intrinsic_shared_atomic:
2218 case nir_intrinsic_shared_atomic_swap:
2219 visit_shared_atomic(bld_base, instr, result);
2220 break;
2221 case nir_intrinsic_barrier:
2222 visit_barrier(bld_base, instr);
2223 break;
2224 case nir_intrinsic_load_kernel_input:
2225 visit_load_kernel_input(bld_base, instr, result);
2226 break;
2227 case nir_intrinsic_load_global:
2228 case nir_intrinsic_load_global_constant:
2229 visit_load_global(bld_base, instr, result);
2230 break;
2231 case nir_intrinsic_store_global:
2232 visit_store_global(bld_base, instr);
2233 break;
2234 case nir_intrinsic_global_atomic:
2235 case nir_intrinsic_global_atomic_swap:
2236 visit_global_atomic(bld_base, instr, result);
2237 break;
2238 case nir_intrinsic_vote_all:
2239 case nir_intrinsic_vote_any:
2240 case nir_intrinsic_vote_ieq:
2241 case nir_intrinsic_vote_feq:
2242 bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2243 break;
2244 case nir_intrinsic_elect:
2245 bld_base->elect(bld_base, result);
2246 break;
2247 case nir_intrinsic_reduce:
2248 case nir_intrinsic_inclusive_scan:
2249 case nir_intrinsic_exclusive_scan:
2250 bld_base->reduce(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, nir_src_bit_size(instr->src[0])), instr, result);
2251 break;
2252 case nir_intrinsic_ballot:
2253 bld_base->ballot(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
2254 break;
2255 #if LLVM_VERSION_MAJOR >= 10
2256 case nir_intrinsic_shuffle:
2257 visit_shuffle(bld_base, instr, result);
2258 break;
2259 #endif
2260 case nir_intrinsic_read_invocation:
2261 case nir_intrinsic_read_first_invocation: {
2262 LLVMValueRef src0 = get_src(bld_base, instr->src[0]);
2263 src0 = cast_type(bld_base, src0, nir_type_int, nir_src_bit_size(instr->src[0]));
2264
2265 LLVMValueRef src1 = NULL;
2266 if (instr->intrinsic == nir_intrinsic_read_invocation)
2267 src1 = cast_type(bld_base, get_src(bld_base, instr->src[1]), nir_type_int, 32);
2268
2269 bld_base->read_invocation(bld_base, src0, nir_src_bit_size(instr->src[0]), src1, result);
2270 break;
2271 }
2272 case nir_intrinsic_interp_deref_at_offset:
2273 case nir_intrinsic_interp_deref_at_centroid:
2274 case nir_intrinsic_interp_deref_at_sample:
2275 visit_interp(bld_base, instr, result);
2276 break;
2277 case nir_intrinsic_load_scratch:
2278 visit_load_scratch(bld_base, instr, result);
2279 break;
2280 case nir_intrinsic_store_scratch:
2281 visit_store_scratch(bld_base, instr);
2282 break;
2283 case nir_intrinsic_shader_clock:
2284 bld_base->clock(bld_base, result);
2285 break;
2286 case nir_intrinsic_launch_mesh_workgroups:
2287 bld_base->launch_mesh_workgroups(bld_base,
2288 get_src(bld_base, instr->src[0]));
2289 break;
2290 case nir_intrinsic_load_task_payload:
2291 visit_payload_load(bld_base, instr, result);
2292 break;
2293 case nir_intrinsic_store_task_payload:
2294 visit_payload_store(bld_base, instr);
2295 break;
2296 case nir_intrinsic_task_payload_atomic:
2297 case nir_intrinsic_task_payload_atomic_swap:
2298 visit_payload_atomic(bld_base, instr, result);
2299 break;
2300 case nir_intrinsic_set_vertex_and_primitive_count:
2301 bld_base->set_vertex_and_primitive_count(bld_base,
2302 get_src(bld_base, instr->src[0]),
2303 get_src(bld_base, instr->src[1]));
2304 break;
2305 case nir_intrinsic_load_param:
2306 visit_load_param(bld_base, instr, result);
2307 break;
2308 case nir_intrinsic_ddx:
2309 case nir_intrinsic_ddy:
2310 case nir_intrinsic_ddx_coarse:
2311 case nir_intrinsic_ddy_coarse:
2312 case nir_intrinsic_ddx_fine:
2313 case nir_intrinsic_ddy_fine: {
2314 LLVMValueRef src = get_src(bld_base, instr->src[0]);
2315 src = cast_type(bld_base, src, nir_type_float, nir_src_bit_size(instr->src[0]));
2316
2317 struct lp_build_context *bld = get_flt_bld(bld_base, nir_src_bit_size(instr->src[0]));
2318
2319 if (instr->intrinsic == nir_intrinsic_ddx ||
2320 instr->intrinsic == nir_intrinsic_ddx_coarse ||
2321 instr->intrinsic == nir_intrinsic_ddx_fine)
2322 result[0] = lp_build_ddx(bld, src);
2323 else
2324 result[0] = lp_build_ddy(bld, src);
2325
2326 break;
2327 }
2328 default:
2329 fprintf(stderr, "Unsupported intrinsic: ");
2330 nir_print_instr(&instr->instr, stderr);
2331 fprintf(stderr, "\n");
2332 assert(0);
2333 break;
2334 }
2335 if (result[0]) {
2336 assign_ssa_dest(bld_base, &instr->def, result);
2337 }
2338 }
2339
2340
2341 static void
visit_txs(struct lp_build_nir_context * bld_base,nir_tex_instr * instr)2342 visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2343 {
2344 struct lp_sampler_size_query_params params = { 0 };
2345 LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
2346 LLVMValueRef explicit_lod = NULL;
2347 LLVMValueRef texture_unit_offset = NULL;
2348 LLVMValueRef resource = NULL;
2349
2350 for (unsigned i = 0; i < instr->num_srcs; i++) {
2351 switch (instr->src[i].src_type) {
2352 case nir_tex_src_lod:
2353 explicit_lod = cast_type(bld_base,
2354 get_src(bld_base, instr->src[i].src),
2355 nir_type_int, 32);
2356 break;
2357 case nir_tex_src_texture_offset:
2358 texture_unit_offset = get_src(bld_base, instr->src[i].src);
2359 break;
2360 case nir_tex_src_texture_handle:
2361 resource = get_src(bld_base, instr->src[i].src);
2362 break;
2363 default:
2364 break;
2365 }
2366 }
2367
2368 params.target = glsl_sampler_to_pipe(instr->sampler_dim, instr->is_array);
2369 params.texture_unit = instr->texture_index;
2370 params.explicit_lod = explicit_lod;
2371 params.is_sviewinfo = true;
2372 params.sizes_out = sizes_out;
2373 params.samples_only = (instr->op == nir_texop_texture_samples);
2374 params.texture_unit_offset = texture_unit_offset;
2375 params.ms = instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
2376 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS;
2377
2378 if (instr->op == nir_texop_query_levels)
2379 params.explicit_lod = bld_base->uint_bld.zero;
2380
2381 params.resource = resource;
2382
2383 bld_base->tex_size(bld_base, ¶ms);
2384 assign_ssa_dest(bld_base, &instr->def,
2385 &sizes_out[instr->op == nir_texop_query_levels ? 3 : 0]);
2386 }
2387
2388
2389 static enum lp_sampler_lod_property
lp_build_nir_lod_property(gl_shader_stage stage,nir_src lod_src)2390 lp_build_nir_lod_property(gl_shader_stage stage, nir_src lod_src)
2391 {
2392 enum lp_sampler_lod_property lod_property;
2393
2394 if (nir_src_is_always_uniform(lod_src)) {
2395 lod_property = LP_SAMPLER_LOD_SCALAR;
2396 } else if (stage == MESA_SHADER_FRAGMENT) {
2397 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2398 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2399 else
2400 lod_property = LP_SAMPLER_LOD_PER_QUAD;
2401 } else {
2402 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2403 }
2404 return lod_property;
2405 }
2406
2407
2408 uint32_t
lp_build_nir_sample_key(gl_shader_stage stage,nir_tex_instr * instr)2409 lp_build_nir_sample_key(gl_shader_stage stage, nir_tex_instr *instr)
2410 {
2411 uint32_t sample_key = 0;
2412
2413 if (instr->op == nir_texop_txf ||
2414 instr->op == nir_texop_txf_ms) {
2415 sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
2416 } else if (instr->op == nir_texop_tg4) {
2417 sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
2418 sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
2419 } else if (instr->op == nir_texop_lod) {
2420 sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
2421 }
2422
2423 bool explicit_lod = false;
2424 uint32_t lod_src = 0;
2425
2426 for (unsigned i = 0; i < instr->num_srcs; i++) {
2427 switch (instr->src[i].src_type) {
2428 case nir_tex_src_comparator:
2429 sample_key |= LP_SAMPLER_SHADOW;
2430 break;
2431 case nir_tex_src_bias:
2432 sample_key |= LP_SAMPLER_LOD_BIAS << LP_SAMPLER_LOD_CONTROL_SHIFT;
2433 explicit_lod = true;
2434 lod_src = i;
2435 break;
2436 case nir_tex_src_lod:
2437 sample_key |= LP_SAMPLER_LOD_EXPLICIT << LP_SAMPLER_LOD_CONTROL_SHIFT;
2438 explicit_lod = true;
2439 lod_src = i;
2440 break;
2441 case nir_tex_src_offset:
2442 sample_key |= LP_SAMPLER_OFFSETS;
2443 break;
2444 case nir_tex_src_ms_index:
2445 sample_key |= LP_SAMPLER_FETCH_MS;
2446 break;
2447 default:
2448 break;
2449 }
2450 }
2451
2452 enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
2453 if (explicit_lod)
2454 lod_property = lp_build_nir_lod_property(stage, instr->src[lod_src].src);
2455
2456 if (instr->op == nir_texop_txd) {
2457 sample_key |= LP_SAMPLER_LOD_DERIVATIVES << LP_SAMPLER_LOD_CONTROL_SHIFT;
2458
2459 if (stage == MESA_SHADER_FRAGMENT) {
2460 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)
2461 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2462 else
2463 lod_property = LP_SAMPLER_LOD_PER_QUAD;
2464 } else
2465 lod_property = LP_SAMPLER_LOD_PER_ELEMENT;
2466 }
2467
2468 sample_key |= lod_property << LP_SAMPLER_LOD_PROPERTY_SHIFT;
2469
2470 if (instr->is_sparse)
2471 sample_key |= LP_SAMPLER_RESIDENCY;
2472
2473 return sample_key;
2474 }
2475
2476
2477 static void
visit_tex(struct lp_build_nir_context * bld_base,nir_tex_instr * instr)2478 visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
2479 {
2480 if (instr->op == nir_texop_txs ||
2481 instr->op == nir_texop_query_levels ||
2482 instr->op == nir_texop_texture_samples) {
2483 visit_txs(bld_base, instr);
2484 return;
2485 }
2486
2487 struct gallivm_state *gallivm = bld_base->base.gallivm;
2488 LLVMBuilderRef builder = gallivm->builder;
2489 LLVMValueRef coords[5];
2490 LLVMValueRef offsets[3] = { NULL };
2491 LLVMValueRef explicit_lod = NULL, ms_index = NULL;
2492 struct lp_sampler_params params = { 0 };
2493 struct lp_derivatives derivs;
2494 nir_deref_instr *texture_deref_instr = NULL;
2495 nir_deref_instr *sampler_deref_instr = NULL;
2496 LLVMValueRef texture_unit_offset = NULL;
2497 LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
2498 LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.vec_type);
2499 unsigned coord_vals = is_aos(bld_base) ? 1 : instr->coord_components;
2500
2501 LLVMValueRef texture_resource = NULL;
2502 LLVMValueRef sampler_resource = NULL;
2503
2504 for (unsigned i = 0; i < instr->num_srcs; i++) {
2505 switch (instr->src[i].src_type) {
2506 case nir_tex_src_coord: {
2507 LLVMValueRef coord = get_src(bld_base, instr->src[i].src);
2508 if (coord_vals == 1) {
2509 coords[0] = coord;
2510 } else {
2511 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2512 coords[chan] = LLVMBuildExtractValue(builder, coord,
2513 chan, "");
2514 }
2515 for (unsigned chan = coord_vals; chan < 5; chan++) {
2516 coords[chan] = coord_undef;
2517 }
2518 break;
2519 }
2520 case nir_tex_src_texture_deref:
2521 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
2522 break;
2523 case nir_tex_src_sampler_deref:
2524 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
2525 break;
2526 case nir_tex_src_comparator:
2527 coords[4] = get_src(bld_base, instr->src[i].src);
2528 coords[4] = cast_type(bld_base, coords[4], nir_type_float, 32);
2529 break;
2530 case nir_tex_src_bias:
2531 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2532 break;
2533 case nir_tex_src_lod:
2534 if (instr->op == nir_texop_txf)
2535 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2536 else
2537 explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_float, 32);
2538 break;
2539 case nir_tex_src_ddx: {
2540 int deriv_cnt = instr->coord_components;
2541 if (instr->is_array)
2542 deriv_cnt--;
2543 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2544 if (deriv_cnt == 1)
2545 derivs.ddx[0] = deriv_val;
2546 else
2547 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2548 derivs.ddx[chan] = LLVMBuildExtractValue(builder, deriv_val,
2549 chan, "");
2550 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2551 derivs.ddx[chan] = cast_type(bld_base, derivs.ddx[chan], nir_type_float, 32);
2552 break;
2553 }
2554 case nir_tex_src_ddy: {
2555 int deriv_cnt = instr->coord_components;
2556 if (instr->is_array)
2557 deriv_cnt--;
2558 LLVMValueRef deriv_val = get_src(bld_base, instr->src[i].src);
2559 if (deriv_cnt == 1)
2560 derivs.ddy[0] = deriv_val;
2561 else
2562 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2563 derivs.ddy[chan] = LLVMBuildExtractValue(builder, deriv_val,
2564 chan, "");
2565 for (unsigned chan = 0; chan < deriv_cnt; ++chan)
2566 derivs.ddy[chan] = cast_type(bld_base, derivs.ddy[chan], nir_type_float, 32);
2567 break;
2568 }
2569 case nir_tex_src_offset: {
2570 int offset_cnt = instr->coord_components;
2571 if (instr->is_array)
2572 offset_cnt--;
2573 LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
2574 if (offset_cnt == 1)
2575 offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
2576 else {
2577 for (unsigned chan = 0; chan < offset_cnt; ++chan) {
2578 offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
2579 chan, "");
2580 offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
2581 }
2582 }
2583 break;
2584 }
2585 case nir_tex_src_ms_index:
2586 ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
2587 break;
2588
2589 case nir_tex_src_texture_offset:
2590 texture_unit_offset = get_src(bld_base, instr->src[i].src);
2591 break;
2592 case nir_tex_src_sampler_offset:
2593 break;
2594 case nir_tex_src_texture_handle:
2595 texture_resource = get_src(bld_base, instr->src[i].src);
2596 break;
2597 case nir_tex_src_sampler_handle:
2598 sampler_resource = get_src(bld_base, instr->src[i].src);
2599 break;
2600 case nir_tex_src_plane:
2601 assert(nir_src_is_const(instr->src[i].src) && !nir_src_as_uint(instr->src[i].src));
2602 break;
2603 default:
2604 assert(0);
2605 break;
2606 }
2607 }
2608 if (!sampler_deref_instr)
2609 sampler_deref_instr = texture_deref_instr;
2610
2611 if (!sampler_resource)
2612 sampler_resource = texture_resource;
2613
2614 switch (instr->op) {
2615 case nir_texop_tex:
2616 case nir_texop_tg4:
2617 case nir_texop_txb:
2618 case nir_texop_txl:
2619 case nir_texop_txd:
2620 case nir_texop_lod:
2621 for (unsigned chan = 0; chan < coord_vals; ++chan)
2622 coords[chan] = cast_type(bld_base, coords[chan], nir_type_float, 32);
2623 break;
2624 case nir_texop_txf:
2625 case nir_texop_txf_ms:
2626 for (unsigned chan = 0; chan < instr->coord_components; ++chan)
2627 coords[chan] = cast_type(bld_base, coords[chan], nir_type_int, 32);
2628 break;
2629 default:
2630 ;
2631 }
2632
2633 if (instr->is_array && instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
2634 /* move layer coord for 1d arrays. */
2635 coords[2] = coords[1];
2636 coords[1] = coord_undef;
2637 }
2638
2639 uint32_t samp_base_index = 0, tex_base_index = 0;
2640 if (!sampler_deref_instr) {
2641 int samp_src_index = nir_tex_instr_src_index(instr, nir_tex_src_sampler_handle);
2642 if (samp_src_index == -1) {
2643 samp_base_index = instr->sampler_index;
2644 }
2645 }
2646 if (!texture_deref_instr) {
2647 int tex_src_index = nir_tex_instr_src_index(instr, nir_tex_src_texture_handle);
2648 if (tex_src_index == -1) {
2649 tex_base_index = instr->texture_index;
2650 }
2651 }
2652
2653 if (instr->op == nir_texop_txd)
2654 params.derivs = &derivs;
2655
2656 params.sample_key = lp_build_nir_sample_key(bld_base->shader->info.stage, instr);
2657 params.offsets = offsets;
2658 params.texture_index = tex_base_index;
2659 params.texture_index_offset = texture_unit_offset;
2660 params.sampler_index = samp_base_index;
2661 params.coords = coords;
2662 params.texel = texel;
2663 params.lod = explicit_lod;
2664 params.ms_index = ms_index;
2665 params.aniso_filter_table = bld_base->aniso_filter_table;
2666 params.texture_resource = texture_resource;
2667 params.sampler_resource = sampler_resource;
2668 bld_base->tex(bld_base, ¶ms);
2669
2670 if (instr->def.bit_size != 32) {
2671 assert(instr->def.bit_size == 16);
2672 LLVMTypeRef vec_type = NULL;
2673 bool is_float = false;
2674 switch (nir_alu_type_get_base_type(instr->dest_type)) {
2675 case nir_type_float:
2676 is_float = true;
2677 break;
2678 case nir_type_int:
2679 vec_type = bld_base->int16_bld.vec_type;
2680 break;
2681 case nir_type_uint:
2682 vec_type = bld_base->uint16_bld.vec_type;
2683 break;
2684 default:
2685 unreachable("unexpected alu type");
2686 }
2687 for (int i = 0; i < instr->def.num_components; ++i) {
2688 if (is_float) {
2689 texel[i] = lp_build_float_to_half(gallivm, texel[i]);
2690 } else {
2691 texel[i] = LLVMBuildBitCast(builder, texel[i], bld_base->int_bld.vec_type, "");
2692 texel[i] = LLVMBuildTrunc(builder, texel[i], vec_type, "");
2693 }
2694 }
2695 }
2696
2697 assign_ssa_dest(bld_base, &instr->def, texel);
2698 }
2699
2700
2701 static void
visit_ssa_undef(struct lp_build_nir_context * bld_base,const nir_undef_instr * instr)2702 visit_ssa_undef(struct lp_build_nir_context *bld_base,
2703 const nir_undef_instr *instr)
2704 {
2705 unsigned num_components = instr->def.num_components;
2706 LLVMValueRef undef[NIR_MAX_VEC_COMPONENTS];
2707 struct lp_build_context *undef_bld = get_int_bld(bld_base, true,
2708 instr->def.bit_size);
2709 for (unsigned i = 0; i < num_components; i++)
2710 undef[i] = LLVMGetUndef(undef_bld->vec_type);
2711 memset(&undef[num_components], 0, NIR_MAX_VEC_COMPONENTS - num_components);
2712 assign_ssa_dest(bld_base, &instr->def, undef);
2713 }
2714
2715
2716 static void
visit_jump(struct lp_build_nir_context * bld_base,const nir_jump_instr * instr)2717 visit_jump(struct lp_build_nir_context *bld_base,
2718 const nir_jump_instr *instr)
2719 {
2720 switch (instr->type) {
2721 case nir_jump_break:
2722 bld_base->break_stmt(bld_base);
2723 break;
2724 case nir_jump_continue:
2725 bld_base->continue_stmt(bld_base);
2726 break;
2727 default:
2728 unreachable("Unknown jump instr\n");
2729 }
2730 }
2731
2732
2733 static void
visit_deref(struct lp_build_nir_context * bld_base,nir_deref_instr * instr)2734 visit_deref(struct lp_build_nir_context *bld_base,
2735 nir_deref_instr *instr)
2736 {
2737 if (!nir_deref_mode_is_one_of(instr, nir_var_mem_shared |
2738 nir_var_mem_global)) {
2739 return;
2740 }
2741
2742 LLVMValueRef result = NULL;
2743 switch(instr->deref_type) {
2744 case nir_deref_type_var: {
2745 struct hash_entry *entry =
2746 _mesa_hash_table_search(bld_base->vars, instr->var);
2747 result = entry->data;
2748 break;
2749 }
2750 default:
2751 unreachable("Unhandled deref_instr deref type");
2752 }
2753
2754 assign_ssa(bld_base, instr->def.index, result);
2755 }
2756
2757 static void
visit_call(struct lp_build_nir_context * bld_base,nir_call_instr * instr)2758 visit_call(struct lp_build_nir_context *bld_base,
2759 nir_call_instr *instr)
2760 {
2761 LLVMValueRef *args;
2762 struct hash_entry *entry = _mesa_hash_table_search(bld_base->fns, instr->callee);
2763 struct lp_build_fn *fn = entry->data;
2764 args = calloc(instr->num_params + LP_RESV_FUNC_ARGS, sizeof(LLVMValueRef));
2765
2766 assert(args);
2767
2768 args[0] = 0;
2769 for (unsigned i = 0; i < instr->num_params; i++) {
2770 LLVMValueRef arg = get_src(bld_base, instr->params[i]);
2771
2772 if (nir_src_bit_size(instr->params[i]) == 32 && LLVMTypeOf(arg) == bld_base->base.vec_type)
2773 arg = cast_type(bld_base, arg, nir_type_int, 32);
2774 args[i + LP_RESV_FUNC_ARGS] = arg;
2775 }
2776
2777 bld_base->call(bld_base, fn, instr->num_params + LP_RESV_FUNC_ARGS, args);
2778 free(args);
2779 }
2780
2781 static void
visit_block(struct lp_build_nir_context * bld_base,nir_block * block)2782 visit_block(struct lp_build_nir_context *bld_base, nir_block *block)
2783 {
2784 nir_foreach_instr(instr, block)
2785 {
2786 switch (instr->type) {
2787 case nir_instr_type_alu:
2788 visit_alu(bld_base, nir_instr_as_alu(instr));
2789 break;
2790 case nir_instr_type_load_const:
2791 visit_load_const(bld_base, nir_instr_as_load_const(instr));
2792 break;
2793 case nir_instr_type_intrinsic:
2794 visit_intrinsic(bld_base, nir_instr_as_intrinsic(instr));
2795 break;
2796 case nir_instr_type_tex:
2797 visit_tex(bld_base, nir_instr_as_tex(instr));
2798 break;
2799 case nir_instr_type_phi:
2800 assert(0);
2801 break;
2802 case nir_instr_type_undef:
2803 visit_ssa_undef(bld_base, nir_instr_as_undef(instr));
2804 break;
2805 case nir_instr_type_jump:
2806 visit_jump(bld_base, nir_instr_as_jump(instr));
2807 break;
2808 case nir_instr_type_deref:
2809 visit_deref(bld_base, nir_instr_as_deref(instr));
2810 break;
2811 case nir_instr_type_call:
2812 visit_call(bld_base, nir_instr_as_call(instr));
2813 break;
2814 default:
2815 fprintf(stderr, "Unknown NIR instr type: ");
2816 nir_print_instr(instr, stderr);
2817 fprintf(stderr, "\n");
2818 abort();
2819 }
2820 }
2821 }
2822
2823 static bool
lp_should_flatten_cf_list(struct exec_list * cf_list)2824 lp_should_flatten_cf_list(struct exec_list *cf_list)
2825 {
2826 if (exec_list_is_empty(cf_list))
2827 return true;
2828 if (!exec_list_is_singular(cf_list))
2829 return false;
2830
2831 struct exec_node *head = exec_list_get_head(cf_list);
2832 nir_block *block = nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
2833 return exec_list_length(&block->instr_list) < 8;
2834 }
2835
2836 static void
visit_if(struct lp_build_nir_context * bld_base,nir_if * if_stmt)2837 visit_if(struct lp_build_nir_context *bld_base, nir_if *if_stmt)
2838 {
2839 LLVMValueRef cond = get_src(bld_base, if_stmt->condition);
2840
2841 bool flatten_then = lp_should_flatten_cf_list(&if_stmt->then_list);
2842
2843 bld_base->if_cond(bld_base, cond, flatten_then);
2844 visit_cf_list(bld_base, &if_stmt->then_list);
2845
2846 if (!exec_list_is_empty(&if_stmt->else_list)) {
2847 bool flatten_else = lp_should_flatten_cf_list(&if_stmt->else_list);
2848 bld_base->else_stmt(bld_base, flatten_then, flatten_else);
2849 visit_cf_list(bld_base, &if_stmt->else_list);
2850 bld_base->endif_stmt(bld_base, flatten_else);
2851 } else {
2852 bld_base->endif_stmt(bld_base, flatten_then);
2853 }
2854 }
2855
2856
2857 static void
visit_loop(struct lp_build_nir_context * bld_base,nir_loop * loop)2858 visit_loop(struct lp_build_nir_context *bld_base, nir_loop *loop)
2859 {
2860 assert(!nir_loop_has_continue_construct(loop));
2861 bld_base->bgnloop(bld_base);
2862 visit_cf_list(bld_base, &loop->body);
2863 bld_base->endloop(bld_base);
2864 }
2865
2866
2867 static void
visit_cf_list(struct lp_build_nir_context * bld_base,struct exec_list * list)2868 visit_cf_list(struct lp_build_nir_context *bld_base,
2869 struct exec_list *list)
2870 {
2871 foreach_list_typed(nir_cf_node, node, node, list)
2872 {
2873 switch (node->type) {
2874 case nir_cf_node_block:
2875 visit_block(bld_base, nir_cf_node_as_block(node));
2876 break;
2877 case nir_cf_node_if:
2878 visit_if(bld_base, nir_cf_node_as_if(node));
2879 break;
2880 case nir_cf_node_loop:
2881 visit_loop(bld_base, nir_cf_node_as_loop(node));
2882 break;
2883 default:
2884 assert(0);
2885 }
2886 }
2887 }
2888
2889
2890 static void
handle_shader_output_decl(struct lp_build_nir_context * bld_base,struct nir_shader * nir,struct nir_variable * variable)2891 handle_shader_output_decl(struct lp_build_nir_context *bld_base,
2892 struct nir_shader *nir,
2893 struct nir_variable *variable)
2894 {
2895 bld_base->emit_var_decl(bld_base, variable);
2896 }
2897
2898
2899 /* vector registers are stored as arrays in LLVM side,
2900 so we can use GEP on them, as to do exec mask stores
2901 we need to operate on a single components.
2902 arrays are:
2903 0.x, 1.x, 2.x, 3.x
2904 0.y, 1.y, 2.y, 3.y
2905 ....
2906 */
2907 static LLVMTypeRef
get_register_type(struct lp_build_nir_context * bld_base,nir_intrinsic_instr * reg)2908 get_register_type(struct lp_build_nir_context *bld_base,
2909 nir_intrinsic_instr *reg)
2910 {
2911 if (is_aos(bld_base))
2912 return bld_base->base.int_vec_type;
2913
2914 unsigned num_array_elems = nir_intrinsic_num_array_elems(reg);
2915 unsigned bit_size = nir_intrinsic_bit_size(reg);
2916 unsigned num_components = nir_intrinsic_num_components(reg);
2917
2918 struct lp_build_context *int_bld =
2919 get_int_bld(bld_base, true, bit_size == 1 ? 32 : bit_size);
2920
2921 LLVMTypeRef type = int_bld->vec_type;
2922 if (num_components > 1)
2923 type = LLVMArrayType(type, num_components);
2924 if (num_array_elems)
2925 type = LLVMArrayType(type, num_array_elems);
2926
2927 return type;
2928 }
2929
2930 void
lp_build_nir_prepasses(struct nir_shader * nir)2931 lp_build_nir_prepasses(struct nir_shader *nir)
2932 {
2933 NIR_PASS_V(nir, nir_convert_to_lcssa, true, true);
2934 NIR_PASS_V(nir, nir_convert_from_ssa, true);
2935 NIR_PASS_V(nir, nir_lower_locals_to_regs, 32);
2936 NIR_PASS_V(nir, nir_remove_dead_derefs);
2937 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
2938 }
2939
lp_build_nir_llvm(struct lp_build_nir_context * bld_base,struct nir_shader * nir,nir_function_impl * impl)2940 bool lp_build_nir_llvm(struct lp_build_nir_context *bld_base,
2941 struct nir_shader *nir,
2942 nir_function_impl *impl)
2943 {
2944 nir_foreach_shader_out_variable(variable, nir)
2945 handle_shader_output_decl(bld_base, nir, variable);
2946
2947 if (nir->info.io_lowered) {
2948 uint64_t outputs_written = nir->info.outputs_written;
2949
2950 while (outputs_written) {
2951 unsigned location = u_bit_scan64(&outputs_written);
2952 nir_variable var = {0};
2953
2954 var.type = glsl_vec4_type();
2955 var.data.mode = nir_var_shader_out;
2956 var.data.location = location;
2957 var.data.driver_location = util_bitcount64(nir->info.outputs_written &
2958 BITFIELD64_MASK(location));
2959 bld_base->emit_var_decl(bld_base, &var);
2960 }
2961 }
2962
2963 bld_base->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2964 _mesa_key_pointer_equal);
2965 bld_base->vars = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2966 _mesa_key_pointer_equal);
2967 bld_base->range_ht = _mesa_pointer_hash_table_create(NULL);
2968
2969 nir_foreach_reg_decl(reg, impl) {
2970 LLVMTypeRef type = get_register_type(bld_base, reg);
2971 LLVMValueRef reg_alloc = lp_build_alloca(bld_base->base.gallivm,
2972 type, "reg");
2973 _mesa_hash_table_insert(bld_base->regs, reg, reg_alloc);
2974 }
2975 nir_index_ssa_defs(impl);
2976 bld_base->ssa_defs = calloc(impl->ssa_alloc, sizeof(LLVMValueRef));
2977 visit_cf_list(bld_base, &impl->body);
2978
2979 free(bld_base->ssa_defs);
2980 ralloc_free(bld_base->vars);
2981 ralloc_free(bld_base->regs);
2982 ralloc_free(bld_base->range_ht);
2983 return true;
2984 }
2985
2986
2987 /* do some basic opts to remove some things we don't want to see. */
2988 void
lp_build_opt_nir(struct nir_shader * nir)2989 lp_build_opt_nir(struct nir_shader *nir)
2990 {
2991 bool progress;
2992
2993 static const struct nir_lower_tex_options lower_tex_options = {
2994 .lower_tg4_offsets = true,
2995 .lower_txp = ~0u,
2996 .lower_invalid_implicit_lod = true,
2997 };
2998 NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
2999 NIR_PASS_V(nir, nir_lower_frexp);
3000
3001 if (nir->info.stage == MESA_SHADER_TASK) {
3002 nir_lower_task_shader_options ts_opts = { 0 };
3003 NIR_PASS_V(nir, nir_lower_task_shader, ts_opts);
3004 }
3005
3006 NIR_PASS_V(nir, nir_lower_flrp, 16|32|64, true);
3007 NIR_PASS_V(nir, nir_lower_fp16_casts, nir_lower_fp16_all | nir_lower_fp16_split_fp64);
3008 do {
3009 progress = false;
3010 NIR_PASS(progress, nir, nir_opt_constant_folding);
3011 NIR_PASS(progress, nir, nir_opt_algebraic);
3012 NIR_PASS(progress, nir, nir_lower_pack);
3013
3014 nir_lower_tex_options options = { .lower_invalid_implicit_lod = true, };
3015 NIR_PASS_V(nir, nir_lower_tex, &options);
3016
3017 const nir_lower_subgroups_options subgroups_options = {
3018 .subgroup_size = lp_native_vector_width / 32,
3019 .ballot_bit_size = 32,
3020 .ballot_components = 1,
3021 .lower_to_scalar = true,
3022 .lower_subgroup_masks = true,
3023 .lower_relative_shuffle = true,
3024 .lower_inverse_ballot = true,
3025 };
3026 NIR_PASS(progress, nir, nir_lower_subgroups, &subgroups_options);
3027 } while (progress);
3028
3029 do {
3030 progress = false;
3031 NIR_PASS(progress, nir, nir_opt_algebraic_late);
3032 if (progress) {
3033 NIR_PASS_V(nir, nir_copy_prop);
3034 NIR_PASS_V(nir, nir_opt_dce);
3035 NIR_PASS_V(nir, nir_opt_cse);
3036 }
3037 } while (progress);
3038
3039 if (nir_lower_bool_to_int32(nir)) {
3040 NIR_PASS_V(nir, nir_copy_prop);
3041 NIR_PASS_V(nir, nir_opt_dce);
3042 }
3043 }
3044