1*412f47f9SXin Li /*
2*412f47f9SXin Li * Double-precision x^y function.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2018-2024, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "math_config.h"
9*412f47f9SXin Li
10*412f47f9SXin Li /* Scalar version of pow used for fallbacks in vector implementations. */
11*412f47f9SXin Li
12*412f47f9SXin Li /* Data is defined in v_pow_log_data.c. */
13*412f47f9SXin Li #define N_LOG (1 << V_POW_LOG_TABLE_BITS)
14*412f47f9SXin Li #define Off 0x3fe6955500000000
15*412f47f9SXin Li #define As __v_pow_log_data.poly
16*412f47f9SXin Li
17*412f47f9SXin Li /* Data is defined in v_pow_exp_data.c. */
18*412f47f9SXin Li #define N_EXP (1 << V_POW_EXP_TABLE_BITS)
19*412f47f9SXin Li #define SignBias (0x800 << V_POW_EXP_TABLE_BITS)
20*412f47f9SXin Li #define SmallExp 0x3c9 /* top12(0x1p-54). */
21*412f47f9SXin Li #define BigExp 0x408 /* top12(512.0). */
22*412f47f9SXin Li #define ThresExp 0x03f /* BigExp - SmallExp. */
23*412f47f9SXin Li #define InvLn2N __v_pow_exp_data.n_over_ln2
24*412f47f9SXin Li #define Ln2HiN __v_pow_exp_data.ln2_over_n_hi
25*412f47f9SXin Li #define Ln2LoN __v_pow_exp_data.ln2_over_n_lo
26*412f47f9SXin Li #define SBits __v_pow_exp_data.sbits
27*412f47f9SXin Li #define Cs __v_pow_exp_data.poly
28*412f47f9SXin Li
29*412f47f9SXin Li /* Constants associated with pow. */
30*412f47f9SXin Li #define SmallPowX 0x001 /* top12(0x1p-126). */
31*412f47f9SXin Li #define BigPowX 0x7ff /* top12(INFINITY). */
32*412f47f9SXin Li #define ThresPowX 0x7fe /* BigPowX - SmallPowX. */
33*412f47f9SXin Li #define SmallPowY 0x3be /* top12(0x1.e7b6p-65). */
34*412f47f9SXin Li #define BigPowY 0x43e /* top12(0x1.749p62). */
35*412f47f9SXin Li #define ThresPowY 0x080 /* BigPowY - SmallPowY. */
36*412f47f9SXin Li
37*412f47f9SXin Li /* Top 12 bits of a double (sign and exponent bits). */
38*412f47f9SXin Li static inline uint32_t
top12(double x)39*412f47f9SXin Li top12 (double x)
40*412f47f9SXin Li {
41*412f47f9SXin Li return asuint64 (x) >> 52;
42*412f47f9SXin Li }
43*412f47f9SXin Li
44*412f47f9SXin Li /* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about
45*412f47f9SXin Li additional 15 bits precision. IX is the bit representation of x, but
46*412f47f9SXin Li normalized in the subnormal range using the sign bit for the exponent. */
47*412f47f9SXin Li static inline double
log_inline(uint64_t ix,double * tail)48*412f47f9SXin Li log_inline (uint64_t ix, double *tail)
49*412f47f9SXin Li {
50*412f47f9SXin Li /* x = 2^k z; where z is in range [Off,2*Off) and exact.
51*412f47f9SXin Li The range is split into N subintervals.
52*412f47f9SXin Li The ith subinterval contains z and c is near its center. */
53*412f47f9SXin Li uint64_t tmp = ix - Off;
54*412f47f9SXin Li int i = (tmp >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1);
55*412f47f9SXin Li int k = (int64_t) tmp >> 52; /* arithmetic shift. */
56*412f47f9SXin Li uint64_t iz = ix - (tmp & 0xfffULL << 52);
57*412f47f9SXin Li double z = asdouble (iz);
58*412f47f9SXin Li double kd = (double) k;
59*412f47f9SXin Li
60*412f47f9SXin Li /* log(x) = k*Ln2 + log(c) + log1p(z/c-1). */
61*412f47f9SXin Li double invc = __v_pow_log_data.invc[i];
62*412f47f9SXin Li double logc = __v_pow_log_data.logc[i];
63*412f47f9SXin Li double logctail = __v_pow_log_data.logctail[i];
64*412f47f9SXin Li
65*412f47f9SXin Li /* Note: 1/c is j/N or j/N/2 where j is an integer in [N,2N) and
66*412f47f9SXin Li |z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */
67*412f47f9SXin Li double r = fma (z, invc, -1.0);
68*412f47f9SXin Li
69*412f47f9SXin Li /* k*Ln2 + log(c) + r. */
70*412f47f9SXin Li double t1 = kd * __v_pow_log_data.ln2_hi + logc;
71*412f47f9SXin Li double t2 = t1 + r;
72*412f47f9SXin Li double lo1 = kd * __v_pow_log_data.ln2_lo + logctail;
73*412f47f9SXin Li double lo2 = t1 - t2 + r;
74*412f47f9SXin Li
75*412f47f9SXin Li /* Evaluation is optimized assuming superscalar pipelined execution. */
76*412f47f9SXin Li double ar = As[0] * r;
77*412f47f9SXin Li double ar2 = r * ar;
78*412f47f9SXin Li double ar3 = r * ar2;
79*412f47f9SXin Li /* k*Ln2 + log(c) + r + A[0]*r*r. */
80*412f47f9SXin Li double hi = t2 + ar2;
81*412f47f9SXin Li double lo3 = fma (ar, r, -ar2);
82*412f47f9SXin Li double lo4 = t2 - hi + ar2;
83*412f47f9SXin Li /* p = log1p(r) - r - A[0]*r*r. */
84*412f47f9SXin Li double p = (ar3
85*412f47f9SXin Li * (As[1] + r * As[2]
86*412f47f9SXin Li + ar2 * (As[3] + r * As[4] + ar2 * (As[5] + r * As[6]))));
87*412f47f9SXin Li double lo = lo1 + lo2 + lo3 + lo4 + p;
88*412f47f9SXin Li double y = hi + lo;
89*412f47f9SXin Li *tail = hi - y + lo;
90*412f47f9SXin Li return y;
91*412f47f9SXin Li }
92*412f47f9SXin Li
93*412f47f9SXin Li /* Handle cases that may overflow or underflow when computing the result that
94*412f47f9SXin Li is scale*(1+TMP) without intermediate rounding. The bit representation of
95*412f47f9SXin Li scale is in SBITS, however it has a computed exponent that may have
96*412f47f9SXin Li overflown into the sign bit so that needs to be adjusted before using it as
97*412f47f9SXin Li a double. (int32_t)KI is the k used in the argument reduction and exponent
98*412f47f9SXin Li adjustment of scale, positive k here means the result may overflow and
99*412f47f9SXin Li negative k means the result may underflow. */
100*412f47f9SXin Li static inline double
special_case(double tmp,uint64_t sbits,uint64_t ki)101*412f47f9SXin Li special_case (double tmp, uint64_t sbits, uint64_t ki)
102*412f47f9SXin Li {
103*412f47f9SXin Li double scale, y;
104*412f47f9SXin Li
105*412f47f9SXin Li if ((ki & 0x80000000) == 0)
106*412f47f9SXin Li {
107*412f47f9SXin Li /* k > 0, the exponent of scale might have overflowed by <= 460. */
108*412f47f9SXin Li sbits -= 1009ull << 52;
109*412f47f9SXin Li scale = asdouble (sbits);
110*412f47f9SXin Li y = 0x1p1009 * (scale + scale * tmp);
111*412f47f9SXin Li return y;
112*412f47f9SXin Li }
113*412f47f9SXin Li /* k < 0, need special care in the subnormal range. */
114*412f47f9SXin Li sbits += 1022ull << 52;
115*412f47f9SXin Li /* Note: sbits is signed scale. */
116*412f47f9SXin Li scale = asdouble (sbits);
117*412f47f9SXin Li y = scale + scale * tmp;
118*412f47f9SXin Li #if WANT_SIMD_EXCEPT
119*412f47f9SXin Li if (fabs (y) < 1.0)
120*412f47f9SXin Li {
121*412f47f9SXin Li /* Round y to the right precision before scaling it into the subnormal
122*412f47f9SXin Li range to avoid double rounding that can cause 0.5+E/2 ulp error where
123*412f47f9SXin Li E is the worst-case ulp error outside the subnormal range. So this
124*412f47f9SXin Li is only useful if the goal is better than 1 ulp worst-case error. */
125*412f47f9SXin Li double hi, lo, one = 1.0;
126*412f47f9SXin Li if (y < 0.0)
127*412f47f9SXin Li one = -1.0;
128*412f47f9SXin Li lo = scale - y + scale * tmp;
129*412f47f9SXin Li hi = one + y;
130*412f47f9SXin Li lo = one - hi + y + lo;
131*412f47f9SXin Li y = (hi + lo) - one;
132*412f47f9SXin Li /* Fix the sign of 0. */
133*412f47f9SXin Li if (y == 0.0)
134*412f47f9SXin Li y = asdouble (sbits & 0x8000000000000000);
135*412f47f9SXin Li /* The underflow exception needs to be signaled explicitly. */
136*412f47f9SXin Li force_eval_double (opt_barrier_double (0x1p-1022) * 0x1p-1022);
137*412f47f9SXin Li }
138*412f47f9SXin Li #endif
139*412f47f9SXin Li y = 0x1p-1022 * y;
140*412f47f9SXin Li return y;
141*412f47f9SXin Li }
142*412f47f9SXin Li
143*412f47f9SXin Li /* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.
144*412f47f9SXin Li The sign_bias argument is SignBias or 0 and sets the sign to -1 or 1. */
145*412f47f9SXin Li static inline double
exp_inline(double x,double xtail,uint32_t sign_bias)146*412f47f9SXin Li exp_inline (double x, double xtail, uint32_t sign_bias)
147*412f47f9SXin Li {
148*412f47f9SXin Li uint32_t abstop = top12 (x) & 0x7ff;
149*412f47f9SXin Li if (unlikely (abstop - SmallExp >= ThresExp))
150*412f47f9SXin Li {
151*412f47f9SXin Li if (abstop - SmallExp >= 0x80000000)
152*412f47f9SXin Li {
153*412f47f9SXin Li /* Avoid spurious underflow for tiny x. */
154*412f47f9SXin Li /* Note: 0 is common input. */
155*412f47f9SXin Li return sign_bias ? -1.0 : 1.0;
156*412f47f9SXin Li }
157*412f47f9SXin Li if (abstop >= top12 (1024.0))
158*412f47f9SXin Li {
159*412f47f9SXin Li /* Note: inf and nan are already handled. */
160*412f47f9SXin Li /* Skip errno handling. */
161*412f47f9SXin Li #if WANT_SIMD_EXCEPT
162*412f47f9SXin Li return asuint64 (x) >> 63 ? __math_uflow (sign_bias)
163*412f47f9SXin Li : __math_oflow (sign_bias);
164*412f47f9SXin Li #else
165*412f47f9SXin Li double res_uoflow = asuint64 (x) >> 63 ? 0.0 : INFINITY;
166*412f47f9SXin Li return sign_bias ? -res_uoflow : res_uoflow;
167*412f47f9SXin Li #endif
168*412f47f9SXin Li }
169*412f47f9SXin Li /* Large x is special cased below. */
170*412f47f9SXin Li abstop = 0;
171*412f47f9SXin Li }
172*412f47f9SXin Li
173*412f47f9SXin Li /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */
174*412f47f9SXin Li /* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N]. */
175*412f47f9SXin Li double z = InvLn2N * x;
176*412f47f9SXin Li double kd = round (z);
177*412f47f9SXin Li uint64_t ki = lround (z);
178*412f47f9SXin Li double r = x - kd * Ln2HiN - kd * Ln2LoN;
179*412f47f9SXin Li /* The code assumes 2^-200 < |xtail| < 2^-8/N. */
180*412f47f9SXin Li r += xtail;
181*412f47f9SXin Li /* 2^(k/N) ~= scale. */
182*412f47f9SXin Li uint64_t idx = ki & (N_EXP - 1);
183*412f47f9SXin Li uint64_t top = (ki + sign_bias) << (52 - V_POW_EXP_TABLE_BITS);
184*412f47f9SXin Li /* This is only a valid scale when -1023*N < k < 1024*N. */
185*412f47f9SXin Li uint64_t sbits = SBits[idx] + top;
186*412f47f9SXin Li /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1). */
187*412f47f9SXin Li /* Evaluation is optimized assuming superscalar pipelined execution. */
188*412f47f9SXin Li double r2 = r * r;
189*412f47f9SXin Li double tmp = r + r2 * Cs[0] + r * r2 * (Cs[1] + r * Cs[2]);
190*412f47f9SXin Li if (unlikely (abstop == 0))
191*412f47f9SXin Li return special_case (tmp, sbits, ki);
192*412f47f9SXin Li double scale = asdouble (sbits);
193*412f47f9SXin Li /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
194*412f47f9SXin Li is no spurious underflow here even without fma. */
195*412f47f9SXin Li return scale + scale * tmp;
196*412f47f9SXin Li }
197*412f47f9SXin Li
198*412f47f9SXin Li /* Computes exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.
199*412f47f9SXin Li A version of exp_inline that is not inlined and for which sign_bias is
200*412f47f9SXin Li equal to 0. */
201*412f47f9SXin Li static double NOINLINE
exp_nosignbias(double x,double xtail)202*412f47f9SXin Li exp_nosignbias (double x, double xtail)
203*412f47f9SXin Li {
204*412f47f9SXin Li uint32_t abstop = top12 (x) & 0x7ff;
205*412f47f9SXin Li if (unlikely (abstop - SmallExp >= ThresExp))
206*412f47f9SXin Li {
207*412f47f9SXin Li /* Avoid spurious underflow for tiny x. */
208*412f47f9SXin Li if (abstop - SmallExp >= 0x80000000)
209*412f47f9SXin Li return 1.0;
210*412f47f9SXin Li /* Note: inf and nan are already handled. */
211*412f47f9SXin Li if (abstop >= top12 (1024.0))
212*412f47f9SXin Li #if WANT_SIMD_EXCEPT
213*412f47f9SXin Li return asuint64 (x) >> 63 ? __math_uflow (0) : __math_oflow (0);
214*412f47f9SXin Li #else
215*412f47f9SXin Li return asuint64 (x) >> 63 ? 0.0 : INFINITY;
216*412f47f9SXin Li #endif
217*412f47f9SXin Li /* Large x is special cased below. */
218*412f47f9SXin Li abstop = 0;
219*412f47f9SXin Li }
220*412f47f9SXin Li
221*412f47f9SXin Li /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */
222*412f47f9SXin Li /* x = ln2/N*k + r, with k integer and r in [-ln2/2N, ln2/2N]. */
223*412f47f9SXin Li double z = InvLn2N * x;
224*412f47f9SXin Li double kd = round (z);
225*412f47f9SXin Li uint64_t ki = lround (z);
226*412f47f9SXin Li double r = x - kd * Ln2HiN - kd * Ln2LoN;
227*412f47f9SXin Li /* The code assumes 2^-200 < |xtail| < 2^-8/N. */
228*412f47f9SXin Li r += xtail;
229*412f47f9SXin Li /* 2^(k/N) ~= scale. */
230*412f47f9SXin Li uint64_t idx = ki & (N_EXP - 1);
231*412f47f9SXin Li uint64_t top = ki << (52 - V_POW_EXP_TABLE_BITS);
232*412f47f9SXin Li /* This is only a valid scale when -1023*N < k < 1024*N. */
233*412f47f9SXin Li uint64_t sbits = SBits[idx] + top;
234*412f47f9SXin Li /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (tail + exp(r) - 1). */
235*412f47f9SXin Li double r2 = r * r;
236*412f47f9SXin Li double tmp = r + r2 * Cs[0] + r * r2 * (Cs[1] + r * Cs[2]);
237*412f47f9SXin Li if (unlikely (abstop == 0))
238*412f47f9SXin Li return special_case (tmp, sbits, ki);
239*412f47f9SXin Li double scale = asdouble (sbits);
240*412f47f9SXin Li /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
241*412f47f9SXin Li is no spurious underflow here even without fma. */
242*412f47f9SXin Li return scale + scale * tmp;
243*412f47f9SXin Li }
244*412f47f9SXin Li
245*412f47f9SXin Li /* Returns 0 if not int, 1 if odd int, 2 if even int. The argument is
246*412f47f9SXin Li the bit representation of a non-zero finite floating-point value. */
247*412f47f9SXin Li static inline int
checkint(uint64_t iy)248*412f47f9SXin Li checkint (uint64_t iy)
249*412f47f9SXin Li {
250*412f47f9SXin Li int e = iy >> 52 & 0x7ff;
251*412f47f9SXin Li if (e < 0x3ff)
252*412f47f9SXin Li return 0;
253*412f47f9SXin Li if (e > 0x3ff + 52)
254*412f47f9SXin Li return 2;
255*412f47f9SXin Li if (iy & ((1ULL << (0x3ff + 52 - e)) - 1))
256*412f47f9SXin Li return 0;
257*412f47f9SXin Li if (iy & (1ULL << (0x3ff + 52 - e)))
258*412f47f9SXin Li return 1;
259*412f47f9SXin Li return 2;
260*412f47f9SXin Li }
261*412f47f9SXin Li
262*412f47f9SXin Li /* Returns 1 if input is the bit representation of 0, infinity or nan. */
263*412f47f9SXin Li static inline int
zeroinfnan(uint64_t i)264*412f47f9SXin Li zeroinfnan (uint64_t i)
265*412f47f9SXin Li {
266*412f47f9SXin Li return 2 * i - 1 >= 2 * asuint64 (INFINITY) - 1;
267*412f47f9SXin Li }
268*412f47f9SXin Li
269*412f47f9SXin Li static double NOINLINE
pow_scalar_special_case(double x,double y)270*412f47f9SXin Li pow_scalar_special_case (double x, double y)
271*412f47f9SXin Li {
272*412f47f9SXin Li uint32_t sign_bias = 0;
273*412f47f9SXin Li uint64_t ix, iy;
274*412f47f9SXin Li uint32_t topx, topy;
275*412f47f9SXin Li
276*412f47f9SXin Li ix = asuint64 (x);
277*412f47f9SXin Li iy = asuint64 (y);
278*412f47f9SXin Li topx = top12 (x);
279*412f47f9SXin Li topy = top12 (y);
280*412f47f9SXin Li if (unlikely (topx - SmallPowX >= ThresPowX
281*412f47f9SXin Li || (topy & 0x7ff) - SmallPowY >= ThresPowY))
282*412f47f9SXin Li {
283*412f47f9SXin Li /* Note: if |y| > 1075 * ln2 * 2^53 ~= 0x1.749p62 then pow(x,y) = inf/0
284*412f47f9SXin Li and if |y| < 2^-54 / 1075 ~= 0x1.e7b6p-65 then pow(x,y) = +-1. */
285*412f47f9SXin Li /* Special cases: (x < 0x1p-126 or inf or nan) or
286*412f47f9SXin Li (|y| < 0x1p-65 or |y| >= 0x1p63 or nan). */
287*412f47f9SXin Li if (unlikely (zeroinfnan (iy)))
288*412f47f9SXin Li {
289*412f47f9SXin Li if (2 * iy == 0)
290*412f47f9SXin Li return issignaling_inline (x) ? x + y : 1.0;
291*412f47f9SXin Li if (ix == asuint64 (1.0))
292*412f47f9SXin Li return issignaling_inline (y) ? x + y : 1.0;
293*412f47f9SXin Li if (2 * ix > 2 * asuint64 (INFINITY)
294*412f47f9SXin Li || 2 * iy > 2 * asuint64 (INFINITY))
295*412f47f9SXin Li return x + y;
296*412f47f9SXin Li if (2 * ix == 2 * asuint64 (1.0))
297*412f47f9SXin Li return 1.0;
298*412f47f9SXin Li if ((2 * ix < 2 * asuint64 (1.0)) == !(iy >> 63))
299*412f47f9SXin Li return 0.0; /* |x|<1 && y==inf or |x|>1 && y==-inf. */
300*412f47f9SXin Li return y * y;
301*412f47f9SXin Li }
302*412f47f9SXin Li if (unlikely (zeroinfnan (ix)))
303*412f47f9SXin Li {
304*412f47f9SXin Li double x2 = x * x;
305*412f47f9SXin Li if (ix >> 63 && checkint (iy) == 1)
306*412f47f9SXin Li {
307*412f47f9SXin Li x2 = -x2;
308*412f47f9SXin Li sign_bias = 1;
309*412f47f9SXin Li }
310*412f47f9SXin Li #if WANT_SIMD_EXCEPT
311*412f47f9SXin Li if (2 * ix == 0 && iy >> 63)
312*412f47f9SXin Li return __math_divzero (sign_bias);
313*412f47f9SXin Li #endif
314*412f47f9SXin Li return iy >> 63 ? 1 / x2 : x2;
315*412f47f9SXin Li }
316*412f47f9SXin Li /* Here x and y are non-zero finite. */
317*412f47f9SXin Li if (ix >> 63)
318*412f47f9SXin Li {
319*412f47f9SXin Li /* Finite x < 0. */
320*412f47f9SXin Li int yint = checkint (iy);
321*412f47f9SXin Li if (yint == 0)
322*412f47f9SXin Li #if WANT_SIMD_EXCEPT
323*412f47f9SXin Li return __math_invalid (x);
324*412f47f9SXin Li #else
325*412f47f9SXin Li return __builtin_nan ("");
326*412f47f9SXin Li #endif
327*412f47f9SXin Li if (yint == 1)
328*412f47f9SXin Li sign_bias = SignBias;
329*412f47f9SXin Li ix &= 0x7fffffffffffffff;
330*412f47f9SXin Li topx &= 0x7ff;
331*412f47f9SXin Li }
332*412f47f9SXin Li if ((topy & 0x7ff) - SmallPowY >= ThresPowY)
333*412f47f9SXin Li {
334*412f47f9SXin Li /* Note: sign_bias == 0 here because y is not odd. */
335*412f47f9SXin Li if (ix == asuint64 (1.0))
336*412f47f9SXin Li return 1.0;
337*412f47f9SXin Li /* |y| < 2^-65, x^y ~= 1 + y*log(x). */
338*412f47f9SXin Li if ((topy & 0x7ff) < SmallPowY)
339*412f47f9SXin Li return 1.0;
340*412f47f9SXin Li #if WANT_SIMD_EXCEPT
341*412f47f9SXin Li return (ix > asuint64 (1.0)) == (topy < 0x800) ? __math_oflow (0)
342*412f47f9SXin Li : __math_uflow (0);
343*412f47f9SXin Li #else
344*412f47f9SXin Li return (ix > asuint64 (1.0)) == (topy < 0x800) ? INFINITY : 0;
345*412f47f9SXin Li #endif
346*412f47f9SXin Li }
347*412f47f9SXin Li if (topx == 0)
348*412f47f9SXin Li {
349*412f47f9SXin Li /* Normalize subnormal x so exponent becomes negative. */
350*412f47f9SXin Li ix = asuint64 (x * 0x1p52);
351*412f47f9SXin Li ix &= 0x7fffffffffffffff;
352*412f47f9SXin Li ix -= 52ULL << 52;
353*412f47f9SXin Li }
354*412f47f9SXin Li }
355*412f47f9SXin Li
356*412f47f9SXin Li double lo;
357*412f47f9SXin Li double hi = log_inline (ix, &lo);
358*412f47f9SXin Li double ehi = y * hi;
359*412f47f9SXin Li double elo = y * lo + fma (y, hi, -ehi);
360*412f47f9SXin Li return exp_inline (ehi, elo, sign_bias);
361*412f47f9SXin Li }
362