1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for type conversions.
32 *
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
36 *
37 * Conversion between types of different bit width is quite complex since a
38 *
39 * To remember there are a few invariants in type conversions:
40 *
41 * - register width must remain constant:
42 *
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
44 *
45 * - total number of elements must remain constant:
46 *
47 * src_type.length * num_srcs == dst_type.length * num_dsts
48 *
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
57 *
58 * Make sure to run lp_test_conv unit test after any change to this file.
59 *
60 * @author Jose Fonseca <[email protected]>
61 */
62
63
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66 #include "util/half_float.h"
67 #include "util/u_cpu_detect.h"
68
69 #include "lp_bld_type.h"
70 #include "lp_bld_const.h"
71 #include "lp_bld_arit.h"
72 #include "lp_bld_bitarit.h"
73 #include "lp_bld_pack.h"
74 #include "lp_bld_conv.h"
75 #include "lp_bld_logic.h"
76 #include "lp_bld_intr.h"
77 #include "lp_bld_printf.h"
78 #include "lp_bld_format.h"
79 #include "lp_bld_limits.h"
80
81
82 /* the lp_test_format test fails on mingw/i686 at -O2 with gcc 10.x
83 * ref https://gitlab.freedesktop.org/mesa/mesa/-/issues/3906
84 */
85
86 #if defined(__MINGW32__) && !defined(__MINGW64__) && (__GNUC__ == 10)
87 #warning "disabling caller-saves optimization for this file to work around compiler bug"
88 #pragma GCC optimize("-fno-caller-saves")
89 #endif
90
91 /**
92 * Converts int16 half-float to float32
93 * Note this can be performed in 1 instruction if vcvtph2ps exists (f16c/cvt16)
94 * [llvm.x86.vcvtph2ps / _mm_cvtph_ps]
95 *
96 * @param src value to convert
97 *
98 */
99 LLVMValueRef
lp_build_half_to_float(struct gallivm_state * gallivm,LLVMValueRef src)100 lp_build_half_to_float(struct gallivm_state *gallivm,
101 LLVMValueRef src)
102 {
103 LLVMBuilderRef builder = gallivm->builder;
104 LLVMTypeRef src_type = LLVMTypeOf(src);
105 unsigned src_length = LLVMGetTypeKind(src_type) == LLVMVectorTypeKind ?
106 LLVMGetVectorSize(src_type) : 1;
107
108 struct lp_type f32_type = lp_type_float_vec(32, 32 * src_length);
109 struct lp_type i16_type = lp_type_int_vec(16, 16 * src_length);
110 struct lp_type i32_type = lp_type_int_vec(32, 32 * src_length);
111 LLVMTypeRef int_vec_type = lp_build_vec_type(gallivm, i16_type);
112 LLVMTypeRef ext_int_vec_type = lp_build_vec_type(gallivm, i32_type);
113 LLVMValueRef h;
114
115 if (lp_has_fp16() && (src_length == 4 || src_length == 8)) {
116 if (util_get_cpu_caps()->has_f16c && LLVM_VERSION_MAJOR < 11) {
117 const char *intrinsic = NULL;
118 if (src_length == 4) {
119 src = lp_build_pad_vector(gallivm, src, 8);
120 intrinsic = "llvm.x86.vcvtph2ps.128";
121 }
122 else {
123 intrinsic = "llvm.x86.vcvtph2ps.256";
124 }
125 src = LLVMBuildBitCast(builder, src,
126 LLVMVectorType(LLVMInt16TypeInContext(gallivm->context), 8), "");
127 return lp_build_intrinsic_unary(builder, intrinsic,
128 lp_build_vec_type(gallivm, f32_type), src);
129 } else {
130 /*
131 * XXX: could probably use on other archs as well.
132 * But if the cpu doesn't support it natively it looks like the backends still
133 * can't lower it and will try to call out to external libraries, which will crash.
134 */
135 /*
136 * XXX: lp_build_vec_type() would use int16 vector. Probably need to revisit
137 * this at some point.
138 */
139 src = LLVMBuildBitCast(builder, src,
140 LLVMVectorType(LLVMHalfTypeInContext(gallivm->context), src_length), "");
141 return LLVMBuildFPExt(builder, src, lp_build_vec_type(gallivm, f32_type), "");
142 }
143 }
144
145 src = LLVMBuildBitCast(builder, src, int_vec_type, "");
146 h = LLVMBuildZExt(builder, src, ext_int_vec_type, "");
147 return lp_build_smallfloat_to_float(gallivm, f32_type, h, 10, 5, 0, true);
148 }
149
150
151 /**
152 * Converts float32 to int16 half-float
153 * Note this can be performed in 1 instruction if vcvtps2ph exists (f16c/cvt16)
154 * [llvm.x86.vcvtps2ph / _mm_cvtps_ph]
155 *
156 * @param src value to convert
157 *
158 * Convert float32 to half floats, preserving Infs and NaNs,
159 * with rounding towards zero (trunc).
160 * XXX: For GL, would prefer rounding towards nearest(-even).
161 */
162 LLVMValueRef
lp_build_float_to_half(struct gallivm_state * gallivm,LLVMValueRef src)163 lp_build_float_to_half(struct gallivm_state *gallivm,
164 LLVMValueRef src)
165 {
166 LLVMBuilderRef builder = gallivm->builder;
167 LLVMTypeRef f32_vec_type = LLVMTypeOf(src);
168 unsigned length = LLVMGetTypeKind(f32_vec_type) == LLVMVectorTypeKind
169 ? LLVMGetVectorSize(f32_vec_type) : 1;
170 struct lp_type i32_type = lp_type_int_vec(32, 32 * length);
171 struct lp_type i16_type = lp_type_int_vec(16, 16 * length);
172 LLVMValueRef result;
173
174 /*
175 * Note: Newer llvm versions (3.6 or so) support fptrunc to 16 bits
176 * directly, without any (x86 or generic) intrinsics.
177 * Albeit the rounding mode cannot be specified (and is undefined,
178 * though in practice on x86 seems to do nearest-even but it may
179 * be dependent on instruction set support), so is essentially
180 * useless.
181 */
182
183 if (util_get_cpu_caps()->has_f16c &&
184 (length == 4 || length == 8)) {
185 struct lp_type i168_type = lp_type_int_vec(16, 16 * 8);
186 unsigned mode = 3; /* same as LP_BUILD_ROUND_TRUNCATE */
187 LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
188 const char *intrinsic = NULL;
189 if (length == 4) {
190 intrinsic = "llvm.x86.vcvtps2ph.128";
191 }
192 else {
193 intrinsic = "llvm.x86.vcvtps2ph.256";
194 }
195 result = lp_build_intrinsic_binary(builder, intrinsic,
196 lp_build_vec_type(gallivm, i168_type),
197 src, LLVMConstInt(i32t, mode, 0));
198 if (length == 4) {
199 result = lp_build_extract_range(gallivm, result, 0, 4);
200 }
201 result = LLVMBuildBitCast(builder, result, lp_build_vec_type(gallivm, lp_type_float_vec(16, 16 * length)), "");
202 }
203
204 else {
205 result = lp_build_float_to_smallfloat(gallivm, i32_type, src, 10, 5, 0, true);
206 /* Convert int32 vector to int16 vector by trunc (might generate bad code) */
207 result = LLVMBuildTrunc(builder, result, lp_build_vec_type(gallivm, i16_type), "");
208 }
209
210 /*
211 * Debugging code.
212 */
213 if (0) {
214 LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
215 LLVMTypeRef i16t = LLVMInt16TypeInContext(gallivm->context);
216 LLVMTypeRef f32t = LLVMFloatTypeInContext(gallivm->context);
217 LLVMValueRef ref_result = LLVMGetUndef(LLVMVectorType(i16t, length));
218 unsigned i;
219
220 LLVMTypeRef func_type = LLVMFunctionType(i16t, &f32t, 1, 0);
221 LLVMValueRef func = lp_build_const_int_pointer(gallivm, func_to_pointer((func_pointer)_mesa_float_to_half));
222 func = LLVMBuildBitCast(builder, func, LLVMPointerType(func_type, 0), "_mesa_float_to_half");
223
224 for (i = 0; i < length; ++i) {
225 LLVMValueRef index = LLVMConstInt(i32t, i, 0);
226 LLVMValueRef f32 = LLVMBuildExtractElement(builder, src, index, "");
227 #if 0
228 /*
229 * XXX: not really supported by backends.
230 * Even if they would now, rounding mode cannot be specified and
231 * is undefined.
232 */
233 LLVMValueRef f16 = lp_build_intrinsic_unary(builder, "llvm.convert.to.fp16", i16t, f32);
234 #else
235 LLVMValueRef f16 = LLVMBuildCall2(builder, func_type, func, &f32, 1, "");
236 #endif
237 ref_result = LLVMBuildInsertElement(builder, ref_result, f16, index, "");
238 }
239
240 lp_build_print_value(gallivm, "src = ", src);
241 lp_build_print_value(gallivm, "llvm = ", result);
242 lp_build_print_value(gallivm, "util = ", ref_result);
243 lp_build_printf(gallivm, "\n");
244 }
245
246 return result;
247 }
248
249
250 /**
251 * Special case for converting clamped IEEE-754 floats to unsigned norms.
252 *
253 * The mathematical voodoo below may seem excessive but it is actually
254 * paramount we do it this way for several reasons. First, there is no single
255 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
256 * secondly, even if there was, since the FP's mantissa takes only a fraction
257 * of register bits the typically scale and cast approach would require double
258 * precision for accurate results, and therefore half the throughput
259 *
260 * Although the result values can be scaled to an arbitrary bit width specified
261 * by dst_width, the actual result type will have the same width.
262 *
263 * Ex: src = { float, float, float, float }
264 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
265 */
266 LLVMValueRef
lp_build_clamped_float_to_unsigned_norm(struct gallivm_state * gallivm,struct lp_type src_type,unsigned dst_width,LLVMValueRef src)267 lp_build_clamped_float_to_unsigned_norm(struct gallivm_state *gallivm,
268 struct lp_type src_type,
269 unsigned dst_width,
270 LLVMValueRef src)
271 {
272 LLVMBuilderRef builder = gallivm->builder;
273 LLVMTypeRef int_vec_type = lp_build_int_vec_type(gallivm, src_type);
274 LLVMValueRef res;
275 unsigned mantissa;
276
277 assert(src_type.floating);
278 assert(dst_width <= src_type.width);
279 src_type.sign = false;
280
281 mantissa = lp_mantissa(src_type);
282
283 if (dst_width <= mantissa) {
284 /*
285 * Apply magic coefficients that will make the desired result to appear
286 * in the lowest significant bits of the mantissa, with correct rounding.
287 *
288 * This only works if the destination width fits in the mantissa.
289 */
290
291 unsigned long long ubound;
292 unsigned long long mask;
293 double scale;
294 double bias;
295
296 ubound = (1ULL << dst_width);
297 mask = ubound - 1;
298 scale = (double)mask/ubound;
299 bias = (double)(1ULL << (mantissa - dst_width));
300
301 res = LLVMBuildFMul(builder, src, lp_build_const_vec(gallivm, src_type, scale), "");
302 /* instead of fadd/and could (with sse2) just use lp_build_iround */
303 res = LLVMBuildFAdd(builder, res, lp_build_const_vec(gallivm, src_type, bias), "");
304 res = LLVMBuildBitCast(builder, res, int_vec_type, "");
305 res = LLVMBuildAnd(builder, res,
306 lp_build_const_int_vec(gallivm, src_type, mask), "");
307 }
308 else if (dst_width == (mantissa + 1)) {
309 /*
310 * The destination width matches exactly what can be represented in
311 * floating point (i.e., mantissa + 1 bits). Even so correct rounding
312 * still needs to be applied (only for numbers in [0.5-1.0] would
313 * conversion using truncation after scaling be sufficient).
314 */
315 double scale;
316 struct lp_build_context uf32_bld;
317
318 lp_build_context_init(&uf32_bld, gallivm, src_type);
319 scale = (double)((1ULL << dst_width) - 1);
320
321 res = LLVMBuildFMul(builder, src,
322 lp_build_const_vec(gallivm, src_type, scale), "");
323 res = lp_build_iround(&uf32_bld, res);
324 }
325 else {
326 /*
327 * The destination exceeds what can be represented in the floating point.
328 * So multiply by the largest power two we get away with, and when
329 * subtract the most significant bit to rescale to normalized values.
330 *
331 * The largest power of two factor we can get away is
332 * (1 << (src_type.width - 1)), because we need to use signed . In theory it
333 * should be (1 << (src_type.width - 2)), but IEEE 754 rules states
334 * INT_MIN should be returned in FPToSI, which is the correct result for
335 * values near 1.0!
336 *
337 * This means we get (src_type.width - 1) correct bits for values near 0.0,
338 * and (mantissa + 1) correct bits for values near 1.0. Equally or more
339 * important, we also get exact results for 0.0 and 1.0.
340 */
341
342 unsigned n = MIN2(src_type.width - 1u, dst_width);
343
344 double scale = (double)(1ULL << n);
345 unsigned lshift = dst_width - n;
346 unsigned rshift = n;
347 LLVMValueRef lshifted;
348 LLVMValueRef rshifted;
349
350 res = LLVMBuildFMul(builder, src,
351 lp_build_const_vec(gallivm, src_type, scale), "");
352 if (!src_type.sign && src_type.width == 32)
353 res = LLVMBuildFPToUI(builder, res, int_vec_type, "");
354 else
355 res = LLVMBuildFPToSI(builder, res, int_vec_type, "");
356
357 /*
358 * Align the most significant bit to its final place.
359 *
360 * This will cause 1.0 to overflow to 0, but the later adjustment will
361 * get it right.
362 */
363 if (lshift) {
364 lshifted = LLVMBuildShl(builder, res,
365 lp_build_const_int_vec(gallivm, src_type,
366 lshift), "");
367 } else {
368 lshifted = res;
369 }
370
371 /*
372 * Align the most significant bit to the right.
373 */
374 rshifted = LLVMBuildLShr(builder, res,
375 lp_build_const_int_vec(gallivm, src_type, rshift),
376 "");
377
378 /*
379 * Subtract the MSB to the LSB, therefore re-scaling from
380 * (1 << dst_width) to ((1 << dst_width) - 1).
381 */
382
383 res = LLVMBuildSub(builder, lshifted, rshifted, "");
384 }
385
386 return res;
387 }
388
389
390 /**
391 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
392 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
393 * return {float, float, float, float} with values in range [0, 1].
394 */
395 LLVMValueRef
lp_build_unsigned_norm_to_float(struct gallivm_state * gallivm,unsigned src_width,struct lp_type dst_type,LLVMValueRef src)396 lp_build_unsigned_norm_to_float(struct gallivm_state *gallivm,
397 unsigned src_width,
398 struct lp_type dst_type,
399 LLVMValueRef src)
400 {
401 LLVMBuilderRef builder = gallivm->builder;
402 LLVMTypeRef vec_type = lp_build_vec_type(gallivm, dst_type);
403 LLVMTypeRef int_vec_type = lp_build_int_vec_type(gallivm, dst_type);
404 LLVMValueRef bias_;
405 LLVMValueRef res;
406 unsigned mantissa;
407 unsigned n;
408 unsigned long long ubound;
409 unsigned long long mask;
410 double scale;
411 double bias;
412
413 assert(dst_type.floating);
414
415 mantissa = lp_mantissa(dst_type);
416
417 if (src_width <= (mantissa + 1)) {
418 /*
419 * The source width matches fits what can be represented in floating
420 * point (i.e., mantissa + 1 bits). So do a straight multiplication
421 * followed by casting. No further rounding is necessary.
422 */
423
424 scale = 1.0/(double)((1ULL << src_width) - 1);
425 res = LLVMBuildSIToFP(builder, src, vec_type, "");
426 res = LLVMBuildFMul(builder, res,
427 lp_build_const_vec(gallivm, dst_type, scale), "");
428 return res;
429 }
430 else {
431 /*
432 * The source width exceeds what can be represented in floating
433 * point. So truncate the incoming values.
434 */
435
436 n = MIN2(mantissa, src_width);
437
438 ubound = ((unsigned long long)1 << n);
439 mask = ubound - 1;
440 scale = (double)ubound/mask;
441 bias = (double)((unsigned long long)1 << (mantissa - n));
442
443 res = src;
444
445 if (src_width > mantissa) {
446 int shift = src_width - mantissa;
447 res = LLVMBuildLShr(builder, res,
448 lp_build_const_int_vec(gallivm, dst_type, shift), "");
449 }
450
451 bias_ = lp_build_const_vec(gallivm, dst_type, bias);
452
453 res = LLVMBuildOr(builder,
454 res,
455 LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
456
457 res = LLVMBuildBitCast(builder, res, vec_type, "");
458
459 res = LLVMBuildFSub(builder, res, bias_, "");
460 res = LLVMBuildFMul(builder, res, lp_build_const_vec(gallivm, dst_type, scale), "");
461 }
462
463 return res;
464 }
465
466
467 /**
468 * Pick a suitable num_dsts for lp_build_conv to ensure optimal cases are used.
469 *
470 * Returns the number of dsts created from src
471 */
lp_build_conv_auto(struct gallivm_state * gallivm,struct lp_type src_type,struct lp_type * dst_type,const LLVMValueRef * src,unsigned num_srcs,LLVMValueRef * dst)472 int lp_build_conv_auto(struct gallivm_state *gallivm,
473 struct lp_type src_type,
474 struct lp_type* dst_type,
475 const LLVMValueRef *src,
476 unsigned num_srcs,
477 LLVMValueRef *dst)
478 {
479 unsigned i;
480 int num_dsts = num_srcs;
481
482 if (src_type.floating == dst_type->floating &&
483 src_type.width == dst_type->width &&
484 src_type.length == dst_type->length &&
485 src_type.fixed == dst_type->fixed &&
486 src_type.norm == dst_type->norm &&
487 src_type.sign == dst_type->sign)
488 return num_dsts;
489
490 /* Special case 4x4x32 -> 1x16x8 or 2x8x32 -> 1x16x8
491 */
492 if (src_type.norm == 0 &&
493 src_type.width == 32 &&
494 src_type.fixed == 0 &&
495
496 dst_type->floating == 0 &&
497 dst_type->fixed == 0 &&
498 dst_type->width == 8 &&
499
500 ((src_type.floating == 1 && src_type.sign == 1 && dst_type->norm == 1) ||
501 (src_type.floating == 0 && dst_type->floating == 0 &&
502 src_type.sign == dst_type->sign && dst_type->norm == 0))) {
503
504 /* Special case 4x4x32 --> 1x16x8 */
505 if (src_type.length == 4 &&
506 (util_get_cpu_caps()->has_sse2 || util_get_cpu_caps()->has_altivec))
507 {
508 num_dsts = (num_srcs + 3) / 4;
509 dst_type->length = num_srcs * 4 >= 16 ? 16 : num_srcs * 4;
510
511 lp_build_conv(gallivm, src_type, *dst_type, src, num_srcs, dst, num_dsts);
512 return num_dsts;
513 }
514
515 /* Special case 2x8x32 --> 1x16x8 */
516 if (src_type.length == 8 &&
517 util_get_cpu_caps()->has_avx)
518 {
519 num_dsts = (num_srcs + 1) / 2;
520 dst_type->length = num_srcs * 8 >= 16 ? 16 : num_srcs * 8;
521
522 lp_build_conv(gallivm, src_type, *dst_type, src, num_srcs, dst, num_dsts);
523 return num_dsts;
524 }
525 }
526
527 /* lp_build_resize does not support M:N */
528 if (src_type.width == dst_type->width) {
529 lp_build_conv(gallivm, src_type, *dst_type, src, num_srcs, dst, num_dsts);
530 } else {
531 /*
532 * If dst_width is 16 bits and src_width 32 and the dst vector size
533 * 64bit, try feeding 2 vectors at once so pack intrinsics can be used.
534 * (For AVX, this isn't needed, since we usually get 256bit src and
535 * 128bit dst vectors which works ok. If we do AVX2 pack this should
536 * be extended but need to be able to tell conversion code about pack
537 * ordering first.)
538 */
539 unsigned ratio = 1;
540 if (src_type.width == 2 * dst_type->width &&
541 src_type.length == dst_type->length &&
542 dst_type->floating == 0 && (num_srcs % 2 == 0) &&
543 dst_type->width * dst_type->length == 64) {
544 ratio = 2;
545 num_dsts /= 2;
546 dst_type->length *= 2;
547 }
548 for (i = 0; i < num_dsts; i++) {
549 lp_build_conv(gallivm, src_type, *dst_type, &src[i*ratio], ratio, &dst[i], 1);
550 }
551 }
552
553 return num_dsts;
554 }
555
556
557 /**
558 * Generic type conversion.
559 *
560 * TODO: Take a precision argument, or even better, add a new precision member
561 * to the lp_type union.
562 */
563 void
lp_build_conv(struct gallivm_state * gallivm,struct lp_type src_type,struct lp_type dst_type,const LLVMValueRef * src,unsigned num_srcs,LLVMValueRef * dst,unsigned num_dsts)564 lp_build_conv(struct gallivm_state *gallivm,
565 struct lp_type src_type,
566 struct lp_type dst_type,
567 const LLVMValueRef *src, unsigned num_srcs,
568 LLVMValueRef *dst, unsigned num_dsts)
569 {
570 LLVMBuilderRef builder = gallivm->builder;
571 struct lp_type tmp_type;
572 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
573 unsigned num_tmps;
574 unsigned i;
575
576 /* We must not loose or gain channels. Only precision */
577 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
578
579 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
580 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
581 assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
582 assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
583
584 tmp_type = src_type;
585 for(i = 0; i < num_srcs; ++i) {
586 assert(lp_check_value(src_type, src[i]));
587 tmp[i] = src[i];
588 }
589 num_tmps = num_srcs;
590
591
592 /*
593 * Special case 4x4x32 --> 1x16x8, 2x4x32 -> 1x8x8, 1x4x32 -> 1x4x8
594 * Only float -> s/unorm8 and (u)int32->(u)int8.
595 * XXX: This should cover all interesting backend cases for 8 bit,
596 * but should use same strategy if dst is 16 bit.
597 */
598 if (src_type.norm == 0 &&
599 src_type.width == 32 &&
600 src_type.length == 4 &&
601 src_type.fixed == 0 &&
602
603 dst_type.floating == 0 &&
604 dst_type.fixed == 0 &&
605 dst_type.width == 8 &&
606
607 ((src_type.floating == 1 && src_type.sign == 1 && dst_type.norm == 1) ||
608 (src_type.floating == 0 && dst_type.floating == 0 &&
609 src_type.sign == dst_type.sign && dst_type.norm == 0)) &&
610
611 ((dst_type.length == 16 && 4 * num_dsts == num_srcs) ||
612 (num_dsts == 1 && dst_type.length * num_srcs == 16 && num_srcs != 3)) &&
613
614 (util_get_cpu_caps()->has_sse2 || util_get_cpu_caps()->has_altivec))
615 {
616 struct lp_build_context bld;
617 struct lp_type int16_type, int32_type;
618 struct lp_type dst_type_ext = dst_type;
619 LLVMValueRef const_scale;
620 unsigned i, j;
621
622 lp_build_context_init(&bld, gallivm, src_type);
623
624 dst_type_ext.length = 16;
625 int16_type = int32_type = dst_type_ext;
626
627 int16_type.width *= 2;
628 int16_type.length /= 2;
629 int16_type.sign = 1;
630
631 int32_type.width *= 4;
632 int32_type.length /= 4;
633 int32_type.sign = 1;
634
635 const_scale = lp_build_const_vec(gallivm, src_type, lp_const_scale(dst_type));
636
637 for (i = 0; i < num_dsts; ++i, src += 4) {
638 LLVMValueRef lo, hi;
639
640 if (src_type.floating) {
641 for (j = 0; j < dst_type.length / 4; ++j) {
642 /*
643 * XXX This is not actually fully correct. The float to int
644 * conversion will produce 0x80000000 value for everything
645 * out of range and NaNs (on x86, llvm.x86.sse2.cvtps2dq).
646 * Hence, NaNs and negatives will get clamped just fine to zero
647 * (relying on clamping pack behavior) when converting to unorm,
648 * however too large values (both finite and infinite) will also
649 * end up as zero, not 255.
650 * For snorm, for now we'll keep bug compatibility with generic
651 * conversion path (meaning too large values are fine, but
652 * NaNs get converted to -128 (purely by luck, as we don't
653 * specify nan behavior for the max there) instead of 0).
654 *
655 * dEQP has GLES31 tests that expect +inf -> 255.0.
656 */
657 if (dst_type.sign) {
658 tmp[j] = lp_build_min(&bld, bld.one, src[j]);
659
660 }
661 else {
662 if (1) {
663 tmp[j] = lp_build_min_ext(&bld, bld.one, src[j],
664 GALLIVM_NAN_RETURN_NAN_FIRST_NONNAN);
665 }
666 tmp[j] = src[j];
667 }
668 tmp[j] = LLVMBuildFMul(builder, tmp[j], const_scale, "");
669 tmp[j] = lp_build_iround(&bld, tmp[j]);
670 }
671 } else {
672 for (j = 0; j < dst_type.length / 4; ++j) {
673 if (!dst_type.sign) {
674 /*
675 * Pack clamp is always signed->unsigned (or signed->signed).
676 * Hence need min.
677 */
678 LLVMValueRef const_max;
679 const_max = lp_build_const_int_vec(gallivm, src_type, 255);
680 tmp[j] = lp_build_min(&bld, src[j], const_max);
681 } else {
682 tmp[j] = src[j];
683 }
684 }
685 }
686
687 if (num_srcs == 1) {
688 tmp[1] = tmp[0];
689 }
690
691 /* relying on clamping behavior of sse2 intrinsics here */
692 lo = lp_build_pack2(gallivm, int32_type, int16_type, tmp[0], tmp[1]);
693
694 if (num_srcs < 4) {
695 hi = lo;
696 }
697 else {
698 hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]);
699 }
700 dst[i] = lp_build_pack2(gallivm, int16_type, dst_type_ext, lo, hi);
701 }
702 if (num_srcs < 4) {
703 dst[0] = lp_build_extract_range(gallivm, dst[0], 0, dst_type.length);
704 }
705
706 return;
707 }
708
709 /* Special case 2x8x32 --> 1x16x8, 1x8x32 ->1x8x8
710 */
711 else if (src_type.norm == 0 &&
712 src_type.width == 32 &&
713 src_type.length == 8 &&
714 src_type.fixed == 0 &&
715
716 dst_type.floating == 0 &&
717 dst_type.fixed == 0 &&
718 dst_type.width == 8 &&
719
720 ((src_type.floating == 1 && src_type.sign == 1 && dst_type.norm == 1) ||
721 (src_type.floating == 0 && dst_type.floating == 0 &&
722 src_type.sign == dst_type.sign && dst_type.norm == 0)) &&
723
724 ((dst_type.length == 16 && 2 * num_dsts == num_srcs) ||
725 (num_dsts == 1 && dst_type.length * num_srcs == 8)) &&
726
727 util_get_cpu_caps()->has_avx) {
728
729 struct lp_build_context bld;
730 struct lp_type int16_type, int32_type;
731 struct lp_type dst_type_ext = dst_type;
732 LLVMValueRef const_scale;
733 unsigned i;
734
735 lp_build_context_init(&bld, gallivm, src_type);
736
737 dst_type_ext.length = 16;
738 int16_type = int32_type = dst_type_ext;
739
740 int16_type.width *= 2;
741 int16_type.length /= 2;
742 int16_type.sign = 1;
743
744 int32_type.width *= 4;
745 int32_type.length /= 4;
746 int32_type.sign = 1;
747
748 const_scale = lp_build_const_vec(gallivm, src_type, lp_const_scale(dst_type));
749
750 for (i = 0; i < num_dsts; ++i, src += 2) {
751 unsigned j;
752 for (j = 0; j < (num_srcs == 1 ? 1 : 2); j++) {
753 LLVMValueRef lo, hi, a;
754
755 a = src[j];
756 if (src_type.floating) {
757 if (dst_type.sign) {
758 a = lp_build_min(&bld, bld.one, a);
759
760 }
761 else {
762 if (1) {
763 a = lp_build_min_ext(&bld, bld.one, a,
764 GALLIVM_NAN_RETURN_NAN_FIRST_NONNAN);
765 }
766 }
767 a = LLVMBuildFMul(builder, a, const_scale, "");
768 a = lp_build_iround(&bld, a);
769 } else {
770 if (!dst_type.sign) {
771 LLVMValueRef const_max;
772 const_max = lp_build_const_int_vec(gallivm, src_type, 255);
773 a = lp_build_min(&bld, a, const_max);
774 }
775 }
776 lo = lp_build_extract_range(gallivm, a, 0, 4);
777 hi = lp_build_extract_range(gallivm, a, 4, 4);
778 /* relying on clamping behavior of sse2 intrinsics here */
779 tmp[j] = lp_build_pack2(gallivm, int32_type, int16_type, lo, hi);
780 }
781
782 if (num_srcs == 1) {
783 tmp[1] = tmp[0];
784 }
785 dst[i] = lp_build_pack2(gallivm, int16_type, dst_type_ext, tmp[0], tmp[1]);
786 }
787
788 if (num_srcs == 1) {
789 dst[0] = lp_build_extract_range(gallivm, dst[0], 0, dst_type.length);
790 }
791
792 return;
793 }
794
795 /* Special case -> 16bit half-float
796 */
797 else if (dst_type.floating && dst_type.width == 16)
798 {
799 /* Only support src as 32bit float currently */
800 assert(src_type.floating && src_type.width == 32);
801
802 for(i = 0; i < num_tmps; ++i)
803 dst[i] = lp_build_float_to_half(gallivm, tmp[i]);
804
805 return;
806 }
807
808 /* Pre convert half-floats to floats
809 */
810 else if (src_type.floating && src_type.width == 16)
811 {
812 for(i = 0; i < num_tmps; ++i)
813 tmp[i] = lp_build_half_to_float(gallivm, tmp[i]);
814
815 tmp_type.width = 32;
816 }
817
818 /*
819 * Clamp if necessary
820 */
821
822 if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
823 struct lp_build_context bld;
824 double src_min = lp_const_min(src_type);
825 double dst_min = lp_const_min(dst_type);
826 double src_max = lp_const_max(src_type);
827 double dst_max = lp_const_max(dst_type);
828 LLVMValueRef thres;
829
830 lp_build_context_init(&bld, gallivm, tmp_type);
831
832 if(src_min < dst_min) {
833 if(dst_min == 0.0)
834 thres = bld.zero;
835 else
836 thres = lp_build_const_vec(gallivm, src_type, dst_min);
837 for(i = 0; i < num_tmps; ++i)
838 tmp[i] = lp_build_max(&bld, tmp[i], thres);
839 }
840
841 if(src_max > dst_max) {
842 if(dst_max == 1.0)
843 thres = bld.one;
844 else
845 thres = lp_build_const_vec(gallivm, src_type, dst_max);
846 for(i = 0; i < num_tmps; ++i)
847 tmp[i] = lp_build_min(&bld, tmp[i], thres);
848 }
849 }
850
851 /*
852 * Scale to the narrowest range
853 */
854
855 if(dst_type.floating) {
856 /* Nothing to do */
857 }
858 else if(tmp_type.floating) {
859 if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
860 for(i = 0; i < num_tmps; ++i) {
861 tmp[i] = lp_build_clamped_float_to_unsigned_norm(gallivm,
862 tmp_type,
863 dst_type.width,
864 tmp[i]);
865 }
866 tmp_type.floating = false;
867 }
868 else {
869 double dst_scale = lp_const_scale(dst_type);
870
871 if (dst_scale != 1.0) {
872 LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, dst_scale);
873 for(i = 0; i < num_tmps; ++i)
874 tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
875 }
876
877 /*
878 * these functions will use fptosi in some form which won't work
879 * with 32bit uint dst. Causes lp_test_conv failures though.
880 */
881 if (0)
882 assert(dst_type.sign || dst_type.width < 32);
883
884 if (dst_type.sign && dst_type.norm && !dst_type.fixed) {
885 struct lp_build_context bld;
886
887 lp_build_context_init(&bld, gallivm, tmp_type);
888 for(i = 0; i < num_tmps; ++i) {
889 tmp[i] = lp_build_iround(&bld, tmp[i]);
890 }
891 tmp_type.floating = false;
892 }
893 else {
894 LLVMTypeRef tmp_vec_type;
895
896 tmp_type.floating = false;
897 tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
898 for(i = 0; i < num_tmps; ++i) {
899 #if 0
900 if(dst_type.sign)
901 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
902 else
903 tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
904 #else
905 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
906 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
907 #endif
908 }
909 }
910 }
911 }
912 else {
913 unsigned src_shift = lp_const_shift(src_type);
914 unsigned dst_shift = lp_const_shift(dst_type);
915 unsigned src_offset = lp_const_offset(src_type);
916 unsigned dst_offset = lp_const_offset(dst_type);
917 struct lp_build_context bld;
918 lp_build_context_init(&bld, gallivm, tmp_type);
919
920 /* Compensate for different offsets */
921 /* sscaled -> unorm and similar would cause negative shift count, skip */
922 if (dst_offset > src_offset && src_type.width > dst_type.width && src_shift > 0) {
923 for (i = 0; i < num_tmps; ++i) {
924 LLVMValueRef shifted;
925
926 shifted = lp_build_shr_imm(&bld, tmp[i], src_shift - 1);
927 tmp[i] = LLVMBuildSub(builder, tmp[i], shifted, "");
928 }
929 }
930
931 if(src_shift > dst_shift) {
932 for(i = 0; i < num_tmps; ++i)
933 tmp[i] = lp_build_shr_imm(&bld, tmp[i], src_shift - dst_shift);
934 }
935 }
936
937 /*
938 * Truncate or expand bit width
939 *
940 * No data conversion should happen here, although the sign bits are
941 * crucial to avoid bad clamping.
942 */
943
944 {
945 struct lp_type new_type;
946
947 new_type = tmp_type;
948 new_type.sign = dst_type.sign;
949 new_type.width = dst_type.width;
950 new_type.length = dst_type.length;
951
952 /*
953 * Note that resize when using packs can sometimes get min/max
954 * clamping for free. Should be able to exploit this...
955 */
956 lp_build_resize(gallivm, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);
957
958 tmp_type = new_type;
959 num_tmps = num_dsts;
960 }
961
962 /*
963 * Scale to the widest range
964 */
965
966 if(src_type.floating) {
967 /* Nothing to do */
968 }
969 else if(!src_type.floating && dst_type.floating) {
970 if(!src_type.fixed && !src_type.sign && src_type.norm) {
971 for(i = 0; i < num_tmps; ++i) {
972 tmp[i] = lp_build_unsigned_norm_to_float(gallivm,
973 src_type.width,
974 dst_type,
975 tmp[i]);
976 }
977 tmp_type.floating = true;
978 }
979 else {
980 double src_scale = lp_const_scale(src_type);
981 LLVMTypeRef tmp_vec_type;
982
983 /* Use an equally sized integer for intermediate computations */
984 tmp_type.floating = true;
985 tmp_type.sign = true;
986 tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
987 for(i = 0; i < num_tmps; ++i) {
988 #if 0
989 if(dst_type.sign)
990 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
991 else
992 tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
993 #else
994 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
995 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
996 #endif
997 }
998
999 if (src_scale != 1.0) {
1000 LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, 1.0/src_scale);
1001 for(i = 0; i < num_tmps; ++i)
1002 tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
1003 }
1004
1005 /* the formula above will produce value below -1.0 for most negative
1006 * value but everything seems happy with that hence disable for now */
1007 if (0 && !src_type.fixed && src_type.norm && src_type.sign) {
1008 struct lp_build_context bld;
1009
1010 lp_build_context_init(&bld, gallivm, dst_type);
1011 for(i = 0; i < num_tmps; ++i) {
1012 tmp[i] = lp_build_max(&bld, tmp[i],
1013 lp_build_const_vec(gallivm, dst_type, -1.0f));
1014 }
1015 }
1016 }
1017 }
1018 else {
1019 unsigned src_shift = lp_const_shift(src_type);
1020 unsigned dst_shift = lp_const_shift(dst_type);
1021 unsigned src_offset = lp_const_offset(src_type);
1022 unsigned dst_offset = lp_const_offset(dst_type);
1023 struct lp_build_context bld;
1024 lp_build_context_init(&bld, gallivm, tmp_type);
1025
1026 if (src_shift < dst_shift) {
1027 LLVMValueRef pre_shift[LP_MAX_VECTOR_LENGTH];
1028
1029 if (dst_shift - src_shift < dst_type.width) {
1030 for (i = 0; i < num_tmps; ++i) {
1031 pre_shift[i] = tmp[i];
1032 tmp[i] = lp_build_shl_imm(&bld, tmp[i], dst_shift - src_shift);
1033 }
1034 }
1035 else {
1036 /*
1037 * This happens for things like sscaled -> unorm conversions. Shift
1038 * counts equal to bit width cause undefined results, so hack around it.
1039 */
1040 for (i = 0; i < num_tmps; ++i) {
1041 pre_shift[i] = tmp[i];
1042 tmp[i] = lp_build_zero(gallivm, dst_type);
1043 }
1044 }
1045
1046 /* Compensate for different offsets */
1047 if (dst_offset > src_offset) {
1048 for (i = 0; i < num_tmps; ++i) {
1049 tmp[i] = LLVMBuildSub(builder, tmp[i], pre_shift[i], "");
1050 }
1051 }
1052 }
1053 }
1054
1055 for(i = 0; i < num_dsts; ++i) {
1056 dst[i] = tmp[i];
1057 assert(lp_check_value(dst_type, dst[i]));
1058 }
1059 }
1060
1061
1062 /**
1063 * Bit mask conversion.
1064 *
1065 * This will convert the integer masks that match the given types.
1066 *
1067 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
1068 * Any other value will likely cause unpredictable results.
1069 *
1070 * This is basically a very trimmed down version of lp_build_conv.
1071 */
1072 void
lp_build_conv_mask(struct gallivm_state * gallivm,struct lp_type src_type,struct lp_type dst_type,const LLVMValueRef * src,unsigned num_srcs,LLVMValueRef * dst,unsigned num_dsts)1073 lp_build_conv_mask(struct gallivm_state *gallivm,
1074 struct lp_type src_type,
1075 struct lp_type dst_type,
1076 const LLVMValueRef *src, unsigned num_srcs,
1077 LLVMValueRef *dst, unsigned num_dsts)
1078 {
1079
1080 /* We must not loose or gain channels. Only precision */
1081 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
1082
1083 /*
1084 * Drop
1085 *
1086 * We assume all values are 0 or -1
1087 */
1088
1089 src_type.floating = false;
1090 src_type.fixed = false;
1091 src_type.sign = true;
1092 src_type.norm = false;
1093
1094 dst_type.floating = false;
1095 dst_type.fixed = false;
1096 dst_type.sign = true;
1097 dst_type.norm = false;
1098
1099 /*
1100 * Truncate or expand bit width
1101 */
1102
1103 lp_build_resize(gallivm, src_type, dst_type, src, num_srcs, dst, num_dsts);
1104 }
1105