1*412f47f9SXin Li /*
2*412f47f9SXin Li * SVE helper for single-precision routines which calculate exp(x) and do
3*412f47f9SXin Li * not need special-case handling
4*412f47f9SXin Li *
5*412f47f9SXin Li * Copyright (c) 2023, Arm Limited.
6*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
7*412f47f9SXin Li */
8*412f47f9SXin Li
9*412f47f9SXin Li #ifndef PL_MATH_SV_EXPF_INLINE_H
10*412f47f9SXin Li #define PL_MATH_SV_EXPF_INLINE_H
11*412f47f9SXin Li
12*412f47f9SXin Li #include "sv_math.h"
13*412f47f9SXin Li #include "pl_sig.h"
14*412f47f9SXin Li #include "pl_test.h"
15*412f47f9SXin Li
16*412f47f9SXin Li struct sv_expf_data
17*412f47f9SXin Li {
18*412f47f9SXin Li float poly[5];
19*412f47f9SXin Li float inv_ln2, ln2_hi, ln2_lo, shift;
20*412f47f9SXin Li };
21*412f47f9SXin Li
22*412f47f9SXin Li /* Coefficients copied from the polynomial in AdvSIMD variant, reversed for
23*412f47f9SXin Li compatibility with polynomial helpers. Shift is 1.5*2^17 + 127. */
24*412f47f9SXin Li #define SV_EXPF_DATA \
25*412f47f9SXin Li { \
26*412f47f9SXin Li .poly = { 0x1.ffffecp-1f, 0x1.fffdb6p-2f, 0x1.555e66p-3f, 0x1.573e2ep-5f, \
27*412f47f9SXin Li 0x1.0e4020p-7f }, \
28*412f47f9SXin Li \
29*412f47f9SXin Li .inv_ln2 = 0x1.715476p+0f, .ln2_hi = 0x1.62e4p-1f, \
30*412f47f9SXin Li .ln2_lo = 0x1.7f7d1cp-20f, .shift = 0x1.803f8p17f, \
31*412f47f9SXin Li }
32*412f47f9SXin Li
33*412f47f9SXin Li #define C(i) sv_f32 (d->poly[i])
34*412f47f9SXin Li
35*412f47f9SXin Li static inline svfloat32_t
expf_inline(svfloat32_t x,const svbool_t pg,const struct sv_expf_data * d)36*412f47f9SXin Li expf_inline (svfloat32_t x, const svbool_t pg, const struct sv_expf_data *d)
37*412f47f9SXin Li {
38*412f47f9SXin Li /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
39*412f47f9SXin Li x = ln2*n + r, with r in [-ln2/2, ln2/2]. */
40*412f47f9SXin Li
41*412f47f9SXin Li /* Load some constants in quad-word chunks to minimise memory access. */
42*412f47f9SXin Li svfloat32_t c4_invln2_and_ln2 = svld1rq (svptrue_b32 (), &d->poly[4]);
43*412f47f9SXin Li
44*412f47f9SXin Li /* n = round(x/(ln2/N)). */
45*412f47f9SXin Li svfloat32_t z = svmla_lane (sv_f32 (d->shift), x, c4_invln2_and_ln2, 1);
46*412f47f9SXin Li svfloat32_t n = svsub_x (pg, z, d->shift);
47*412f47f9SXin Li
48*412f47f9SXin Li /* r = x - n*ln2/N. */
49*412f47f9SXin Li svfloat32_t r = svmls_lane (x, n, c4_invln2_and_ln2, 2);
50*412f47f9SXin Li r = svmls_lane (r, n, c4_invln2_and_ln2, 3);
51*412f47f9SXin Li
52*412f47f9SXin Li /* scale = 2^(n/N). */
53*412f47f9SXin Li svfloat32_t scale = svexpa (svreinterpret_u32_f32 (z));
54*412f47f9SXin Li
55*412f47f9SXin Li /* y = exp(r) - 1 ~= r + C0 r^2 + C1 r^3 + C2 r^4 + C3 r^5 + C4 r^6. */
56*412f47f9SXin Li svfloat32_t p12 = svmla_x (pg, C (1), C (2), r);
57*412f47f9SXin Li svfloat32_t p34 = svmla_lane (C (3), r, c4_invln2_and_ln2, 0);
58*412f47f9SXin Li svfloat32_t r2 = svmul_f32_x (pg, r, r);
59*412f47f9SXin Li svfloat32_t p14 = svmla_x (pg, p12, p34, r2);
60*412f47f9SXin Li svfloat32_t p0 = svmul_f32_x (pg, r, C (0));
61*412f47f9SXin Li svfloat32_t poly = svmla_x (pg, p0, r2, p14);
62*412f47f9SXin Li
63*412f47f9SXin Li return svmla_x (pg, scale, scale, poly);
64*412f47f9SXin Li }
65*412f47f9SXin Li
66*412f47f9SXin Li #endif // PL_MATH_SV_EXPF_INLINE_H