xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_builtin_builder.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2018 Red Hat Inc.
3  * Copyright © 2015 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include <math.h>
26 
27 #include "nir.h"
28 #include "nir_builder.h"
29 #include "nir_builtin_builder.h"
30 
31 nir_def *
nir_cross3(nir_builder * b,nir_def * x,nir_def * y)32 nir_cross3(nir_builder *b, nir_def *x, nir_def *y)
33 {
34    unsigned yzx[3] = { 1, 2, 0 };
35    unsigned zxy[3] = { 2, 0, 1 };
36 
37    return nir_ffma(b, nir_swizzle(b, x, yzx, 3),
38                    nir_swizzle(b, y, zxy, 3),
39                    nir_fneg(b, nir_fmul(b, nir_swizzle(b, x, zxy, 3),
40                                         nir_swizzle(b, y, yzx, 3))));
41 }
42 
43 nir_def *
nir_cross4(nir_builder * b,nir_def * x,nir_def * y)44 nir_cross4(nir_builder *b, nir_def *x, nir_def *y)
45 {
46    nir_def *cross = nir_cross3(b, x, y);
47 
48    return nir_vec4(b,
49                    nir_channel(b, cross, 0),
50                    nir_channel(b, cross, 1),
51                    nir_channel(b, cross, 2),
52                    nir_imm_intN_t(b, 0, cross->bit_size));
53 }
54 
55 nir_def *
nir_fast_length(nir_builder * b,nir_def * vec)56 nir_fast_length(nir_builder *b, nir_def *vec)
57 {
58    return nir_fsqrt(b, nir_fdot(b, vec, vec));
59 }
60 
61 nir_def *
nir_nextafter(nir_builder * b,nir_def * x,nir_def * y)62 nir_nextafter(nir_builder *b, nir_def *x, nir_def *y)
63 {
64    nir_def *zero = nir_imm_intN_t(b, 0, x->bit_size);
65    nir_def *one = nir_imm_intN_t(b, 1, x->bit_size);
66 
67    nir_def *condeq = nir_feq(b, x, y);
68    nir_def *conddir = nir_flt(b, x, y);
69    nir_def *condzero = nir_feq(b, x, zero);
70 
71    uint64_t sign_mask = 1ull << (x->bit_size - 1);
72    uint64_t min_abs = 1;
73 
74    if (nir_is_denorm_flush_to_zero(b->shader->info.float_controls_execution_mode, x->bit_size)) {
75       switch (x->bit_size) {
76       case 16:
77          min_abs = 1 << 10;
78          break;
79       case 32:
80          min_abs = 1 << 23;
81          break;
82       case 64:
83          min_abs = 1ULL << 52;
84          break;
85       }
86 
87       /* Flush denorm to zero to avoid returning a denorm when condeq is true. */
88       x = nir_fmul_imm(b, x, 1.0);
89    }
90 
91    /* beware of: +/-0.0 - 1 == NaN */
92    nir_def *xn =
93       nir_bcsel(b,
94                 condzero,
95                 nir_imm_intN_t(b, sign_mask | min_abs, x->bit_size),
96                 nir_isub(b, x, one));
97 
98    /* beware of -0.0 + 1 == -0x1p-149 */
99    nir_def *xp = nir_bcsel(b, condzero,
100                            nir_imm_intN_t(b, min_abs, x->bit_size),
101                            nir_iadd(b, x, one));
102 
103    /* nextafter can be implemented by just +/- 1 on the int value */
104    nir_def *res =
105       nir_bcsel(b, nir_ixor(b, conddir, nir_flt(b, x, zero)), xp, xn);
106 
107    return nir_nan_check2(b, x, y, nir_bcsel(b, condeq, x, res));
108 }
109 
110 nir_def *
nir_normalize(nir_builder * b,nir_def * vec)111 nir_normalize(nir_builder *b, nir_def *vec)
112 {
113    if (vec->num_components == 1)
114       return nir_fsign(b, vec);
115 
116    nir_def *f0 = nir_imm_floatN_t(b, 0.0, vec->bit_size);
117    nir_def *f1 = nir_imm_floatN_t(b, 1.0, vec->bit_size);
118    nir_def *finf = nir_imm_floatN_t(b, INFINITY, vec->bit_size);
119 
120    /* scale the input to increase precision */
121    nir_def *maxc = nir_fmax_abs_vec_comp(b, vec);
122    nir_def *svec = nir_fdiv(b, vec, maxc);
123    /* for inf */
124    nir_def *finfvec = nir_copysign(b, nir_bcsel(b, nir_feq(b, vec, finf), f1, f0), f1);
125 
126    nir_def *temp = nir_bcsel(b, nir_feq(b, maxc, finf), finfvec, svec);
127    nir_def *res = nir_fmul(b, temp, nir_frsq(b, nir_fdot(b, temp, temp)));
128 
129    return nir_bcsel(b, nir_feq(b, maxc, f0), vec, res);
130 }
131 
132 nir_def *
nir_smoothstep(nir_builder * b,nir_def * edge0,nir_def * edge1,nir_def * x)133 nir_smoothstep(nir_builder *b, nir_def *edge0, nir_def *edge1, nir_def *x)
134 {
135    nir_def *f2 = nir_imm_floatN_t(b, 2.0, x->bit_size);
136    nir_def *f3 = nir_imm_floatN_t(b, 3.0, x->bit_size);
137 
138    /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
139    nir_def *t =
140       nir_fsat(b, nir_fdiv(b, nir_fsub(b, x, edge0),
141                            nir_fsub(b, edge1, edge0)));
142 
143    /* result = t * t * (3 - 2 * t) */
144    return nir_fmul(b, t, nir_fmul(b, t, nir_a_minus_bc(b, f3, f2, t)));
145 }
146 
147 nir_def *
nir_upsample(nir_builder * b,nir_def * hi,nir_def * lo)148 nir_upsample(nir_builder *b, nir_def *hi, nir_def *lo)
149 {
150    assert(lo->num_components == hi->num_components);
151    assert(lo->bit_size == hi->bit_size);
152 
153    nir_def *res[NIR_MAX_VEC_COMPONENTS];
154    for (unsigned i = 0; i < lo->num_components; ++i) {
155       nir_def *vec = nir_vec2(b, nir_channel(b, lo, i), nir_channel(b, hi, i));
156       res[i] = nir_pack_bits(b, vec, vec->bit_size * 2);
157    }
158 
159    return nir_vec(b, res, lo->num_components);
160 }
161 
162 nir_def *
nir_atan(nir_builder * b,nir_def * y_over_x)163 nir_atan(nir_builder *b, nir_def *y_over_x)
164 {
165    const uint32_t bit_size = y_over_x->bit_size;
166 
167    nir_def *abs_y_over_x = nir_fabs(b, y_over_x);
168 
169    /*
170     * range-reduction, first step:
171     *
172     *      / y_over_x         if |y_over_x| <= 1.0;
173     * u = <
174     *      \ 1.0 / y_over_x   otherwise
175     *
176     * x = |u| for the corrected sign.
177     */
178    nir_def *le_1 = nir_fle_imm(b, abs_y_over_x, 1.0);
179    nir_def *u = nir_bcsel(b, le_1, y_over_x, nir_frcp(b, y_over_x));
180 
181    /*
182     * approximate atan by evaluating polynomial using Horner's method:
183     *
184     * x   * 0.9999793128310355 - x^3  * 0.3326756418091246 +
185     * x^5 * 0.1938924977115610 - x^7  * 0.1173503194786851 +
186     * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
187     */
188    float coeffs[] = {
189       -0.0121323213173444f, 0.0536813784310406f,
190       -0.1173503194786851f, 0.1938924977115610f,
191       -0.3326756418091246f, 0.9999793128310355f
192    };
193 
194    nir_def *x_2 = nir_fmul(b, u, u);
195    nir_def *res = nir_imm_floatN_t(b, coeffs[0], bit_size);
196 
197    for (unsigned i = 1; i < ARRAY_SIZE(coeffs); ++i) {
198       res = nir_ffma_imm2(b, res, x_2, coeffs[i]);
199    }
200 
201    /* range-reduction fixup value */
202    nir_def *bias = nir_bcsel(b, le_1, nir_imm_floatN_t(b, 0, bit_size),
203                              nir_imm_floatN_t(b, -M_PI_2, bit_size));
204 
205    /* multiply through by x while fixing up the range reduction */
206    nir_def *tmp = nir_ffma(b, nir_fabs(b, u), res, bias);
207 
208    /* sign fixup */
209    return nir_copysign(b, tmp, y_over_x);
210 }
211 
212 nir_def *
nir_atan2(nir_builder * b,nir_def * y,nir_def * x)213 nir_atan2(nir_builder *b, nir_def *y, nir_def *x)
214 {
215    assert(y->bit_size == x->bit_size);
216    const uint32_t bit_size = x->bit_size;
217 
218    nir_def *zero = nir_imm_floatN_t(b, 0, bit_size);
219    nir_def *one = nir_imm_floatN_t(b, 1, bit_size);
220 
221    /* If we're on the left half-plane rotate the coordinates π/2 clock-wise
222     * for the y=0 discontinuity to end up aligned with the vertical
223     * discontinuity of atan(s/t) along t=0.  This also makes sure that we
224     * don't attempt to divide by zero along the vertical line, which may give
225     * unspecified results on non-GLSL 4.1-capable hardware.
226     */
227    nir_def *flip = nir_fge(b, zero, x);
228    nir_def *s = nir_bcsel(b, flip, nir_fabs(b, x), y);
229    nir_def *t = nir_bcsel(b, flip, y, nir_fabs(b, x));
230 
231    /* If the magnitude of the denominator exceeds some huge value, scale down
232     * the arguments in order to prevent the reciprocal operation from flushing
233     * its result to zero, which would cause precision problems, and for s
234     * infinite would cause us to return a NaN instead of the correct finite
235     * value.
236     *
237     * If fmin and fmax are respectively the smallest and largest positive
238     * normalized floating point values representable by the implementation,
239     * the constants below should be in agreement with:
240     *
241     *    huge <= 1 / fmin
242     *    scale <= 1 / fmin / fmax (for |t| >= huge)
243     *
244     * In addition scale should be a negative power of two in order to avoid
245     * loss of precision.  The values chosen below should work for most usual
246     * floating point representations with at least the dynamic range of ATI's
247     * 24-bit representation.
248     */
249    const double huge_val = bit_size >= 32 ? 1e18 : 16384;
250    nir_def *scale = nir_bcsel(b, nir_fge_imm(b, nir_fabs(b, t), huge_val),
251                               nir_imm_floatN_t(b, 0.25, bit_size), one);
252    nir_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
253    nir_def *abs_s_over_t = nir_fmul(b, nir_fabs(b, nir_fmul(b, s, scale)),
254                                     nir_fabs(b, rcp_scaled_t));
255 
256    /* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
257     * that ∞/∞ = 1) in order to comply with the rather artificial rules
258     * inherited from IEEE 754-2008, namely:
259     *
260     *  "atan2(±∞, −∞) is ±3π/4
261     *   atan2(±∞, +∞) is ±π/4"
262     *
263     * Note that this is inconsistent with the rules for the neighborhood of
264     * zero that are based on iterated limits:
265     *
266     *  "atan2(±0, −0) is ±π
267     *   atan2(±0, +0) is ±0"
268     *
269     * but GLSL specifically allows implementations to deviate from IEEE rules
270     * at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
271     * well).
272     */
273    nir_def *tan = nir_bcsel(b, nir_feq(b, nir_fabs(b, x), nir_fabs(b, y)),
274                             one, abs_s_over_t);
275 
276    /* Calculate the arctangent and fix up the result if we had flipped the
277     * coordinate system.
278     */
279    nir_def *arc =
280       nir_ffma_imm1(b, nir_b2fN(b, flip, bit_size), M_PI_2, nir_atan(b, tan));
281 
282    /* Rather convoluted calculation of the sign of the result.  When x < 0 we
283     * cannot use fsign because we need to be able to distinguish between
284     * negative and positive zero.  We don't use bitwise arithmetic tricks for
285     * consistency with the GLSL front-end.  When x >= 0 rcp_scaled_t will
286     * always be non-negative so this won't be able to distinguish between
287     * negative and positive zero, but we don't care because atan2 is
288     * continuous along the whole positive y = 0 half-line, so it won't affect
289     * the result significantly.
290     */
291    return nir_bcsel(b, nir_flt(b, nir_fmin(b, y, rcp_scaled_t), zero),
292                     nir_fneg(b, arc), arc);
293 }
294 
295 nir_def *
nir_build_texture_query(nir_builder * b,nir_tex_instr * tex,nir_texop texop,unsigned components,nir_alu_type dest_type,bool include_coord,bool include_lod)296 nir_build_texture_query(nir_builder *b, nir_tex_instr *tex, nir_texop texop,
297                         unsigned components, nir_alu_type dest_type,
298                         bool include_coord, bool include_lod)
299 {
300    nir_tex_instr *query;
301 
302    unsigned num_srcs = include_lod ? 1 : 0;
303    for (unsigned i = 0; i < tex->num_srcs; i++) {
304       if ((tex->src[i].src_type == nir_tex_src_coord && include_coord) ||
305           tex->src[i].src_type == nir_tex_src_texture_deref ||
306           tex->src[i].src_type == nir_tex_src_sampler_deref ||
307           tex->src[i].src_type == nir_tex_src_texture_offset ||
308           tex->src[i].src_type == nir_tex_src_sampler_offset ||
309           tex->src[i].src_type == nir_tex_src_texture_handle ||
310           tex->src[i].src_type == nir_tex_src_sampler_handle)
311          num_srcs++;
312    }
313 
314    query = nir_tex_instr_create(b->shader, num_srcs);
315    query->op = texop;
316    query->sampler_dim = tex->sampler_dim;
317    query->is_array = tex->is_array;
318    query->is_shadow = tex->is_shadow;
319    query->is_new_style_shadow = tex->is_new_style_shadow;
320    query->texture_index = tex->texture_index;
321    query->sampler_index = tex->sampler_index;
322    query->dest_type = dest_type;
323 
324    if (include_coord) {
325       query->coord_components = tex->coord_components;
326    }
327 
328    unsigned idx = 0;
329    for (unsigned i = 0; i < tex->num_srcs; i++) {
330       if ((tex->src[i].src_type == nir_tex_src_coord && include_coord) ||
331           tex->src[i].src_type == nir_tex_src_texture_deref ||
332           tex->src[i].src_type == nir_tex_src_sampler_deref ||
333           tex->src[i].src_type == nir_tex_src_texture_offset ||
334           tex->src[i].src_type == nir_tex_src_sampler_offset ||
335           tex->src[i].src_type == nir_tex_src_texture_handle ||
336           tex->src[i].src_type == nir_tex_src_sampler_handle) {
337          query->src[idx].src = nir_src_for_ssa(tex->src[i].src.ssa);
338          query->src[idx].src_type = tex->src[i].src_type;
339          idx++;
340       }
341    }
342 
343    /* Add in an LOD because some back-ends require it */
344    if (include_lod) {
345       query->src[idx] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(b, 0));
346    }
347 
348    nir_def_init(&query->instr, &query->def, nir_tex_instr_dest_size(query),
349                 nir_alu_type_get_type_size(dest_type));
350 
351    nir_builder_instr_insert(b, &query->instr);
352    return &query->def;
353 }
354 
355 nir_def *
nir_get_texture_size(nir_builder * b,nir_tex_instr * tex)356 nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
357 {
358    b->cursor = nir_before_instr(&tex->instr);
359 
360    return nir_build_texture_query(b, tex, nir_texop_txs,
361                                   nir_tex_instr_dest_size(tex),
362                                   nir_type_int32, false, true);
363 }
364 
365 nir_def *
nir_get_texture_lod(nir_builder * b,nir_tex_instr * tex)366 nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
367 {
368    b->cursor = nir_before_instr(&tex->instr);
369 
370    nir_def *tql = nir_build_texture_query(b, tex, nir_texop_lod, 2,
371                                           nir_type_float32, true, false);
372 
373    /* The LOD is the y component of the result */
374    return nir_channel(b, tql, 1);
375 }
376