xref: /aosp_15_r20/external/llvm-libc/src/math/generic/expm1.cpp (revision 71db0c75aadcf003ffe3238005f61d7618a3fead)
1 //===-- Double-precision e^x - 1 function ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "src/math/expm1.h"
10 #include "common_constants.h" // Lookup tables EXP_M1 and EXP_M2.
11 #include "explogxf.h"         // ziv_test_denorm.
12 #include "src/__support/CPP/bit.h"
13 #include "src/__support/CPP/optional.h"
14 #include "src/__support/FPUtil/FEnvImpl.h"
15 #include "src/__support/FPUtil/FPBits.h"
16 #include "src/__support/FPUtil/PolyEval.h"
17 #include "src/__support/FPUtil/double_double.h"
18 #include "src/__support/FPUtil/dyadic_float.h"
19 #include "src/__support/FPUtil/except_value_utils.h"
20 #include "src/__support/FPUtil/multiply_add.h"
21 #include "src/__support/FPUtil/nearest_integer.h"
22 #include "src/__support/FPUtil/rounding_mode.h"
23 #include "src/__support/FPUtil/triple_double.h"
24 #include "src/__support/common.h"
25 #include "src/__support/integer_literals.h"
26 #include "src/__support/macros/config.h"
27 #include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
28 
29 #if ((LIBC_MATH & LIBC_MATH_SKIP_ACCURATE_PASS) != 0)
30 #define LIBC_MATH_EXPM1_SKIP_ACCURATE_PASS
31 #endif
32 
33 namespace LIBC_NAMESPACE_DECL {
34 
35 using fputil::DoubleDouble;
36 using fputil::TripleDouble;
37 using Float128 = typename fputil::DyadicFloat<128>;
38 
39 using LIBC_NAMESPACE::operator""_u128;
40 
41 // log2(e)
42 constexpr double LOG2_E = 0x1.71547652b82fep+0;
43 
44 // Error bounds:
45 // Errors when using double precision.
46 // 0x1.8p-63;
47 constexpr uint64_t ERR_D = 0x3c08000000000000;
48 // Errors when using double-double precision.
49 // 0x1.0p-99
50 [[maybe_unused]] constexpr uint64_t ERR_DD = 0x39c0000000000000;
51 
52 // -2^-12 * log(2)
53 // > a = -2^-12 * log(2);
54 // > b = round(a, 30, RN);
55 // > c = round(a - b, 30, RN);
56 // > d = round(a - b - c, D, RN);
57 // Errors < 1.5 * 2^-133
58 constexpr double MLOG_2_EXP2_M12_HI = -0x1.62e42ffp-13;
59 constexpr double MLOG_2_EXP2_M12_MID = 0x1.718432a1b0e26p-47;
60 constexpr double MLOG_2_EXP2_M12_MID_30 = 0x1.718432ap-47;
61 constexpr double MLOG_2_EXP2_M12_LO = 0x1.b0e2633fe0685p-79;
62 
63 namespace {
64 
65 // Polynomial approximations with double precision:
66 // Return expm1(dx) / x ~ 1 + dx / 2 + dx^2 / 6 + dx^3 / 24.
67 // For |dx| < 2^-13 + 2^-30:
68 //   | output - expm1(dx) / dx | < 2^-51.
poly_approx_d(double dx)69 LIBC_INLINE double poly_approx_d(double dx) {
70   // dx^2
71   double dx2 = dx * dx;
72   // c0 = 1 + dx / 2
73   double c0 = fputil::multiply_add(dx, 0.5, 1.0);
74   // c1 = 1/6 + dx / 24
75   double c1 =
76       fputil::multiply_add(dx, 0x1.5555555555555p-5, 0x1.5555555555555p-3);
77   // p = dx^2 * c1 + c0 = 1 + dx / 2 + dx^2 / 6 + dx^3 / 24
78   double p = fputil::multiply_add(dx2, c1, c0);
79   return p;
80 }
81 
82 // Polynomial approximation with double-double precision:
83 // Return expm1(dx) / dx ~ 1 + dx / 2 + dx^2 / 6 + ... + dx^6 / 5040
84 // For |dx| < 2^-13 + 2^-30:
85 //   | output - expm1(dx) | < 2^-101
poly_approx_dd(const DoubleDouble & dx)86 DoubleDouble poly_approx_dd(const DoubleDouble &dx) {
87   // Taylor polynomial.
88   constexpr DoubleDouble COEFFS[] = {
89       {0, 0x1p0},                                      // 1
90       {0, 0x1p-1},                                     // 1/2
91       {0x1.5555555555555p-57, 0x1.5555555555555p-3},   // 1/6
92       {0x1.5555555555555p-59, 0x1.5555555555555p-5},   // 1/24
93       {0x1.1111111111111p-63, 0x1.1111111111111p-7},   // 1/120
94       {-0x1.f49f49f49f49fp-65, 0x1.6c16c16c16c17p-10}, // 1/720
95       {0x1.a01a01a01a01ap-73, 0x1.a01a01a01a01ap-13},  // 1/5040
96   };
97 
98   DoubleDouble p = fputil::polyeval(dx, COEFFS[0], COEFFS[1], COEFFS[2],
99                                     COEFFS[3], COEFFS[4], COEFFS[5], COEFFS[6]);
100   return p;
101 }
102 
103 // Polynomial approximation with 128-bit precision:
104 // Return (exp(dx) - 1)/dx ~ 1 + dx / 2 + dx^2 / 6 + ... + dx^6 / 5040
105 // For |dx| < 2^-13 + 2^-30:
106 //   | output - exp(dx) | < 2^-126.
poly_approx_f128(const Float128 & dx)107 [[maybe_unused]] Float128 poly_approx_f128(const Float128 &dx) {
108   constexpr Float128 COEFFS_128[]{
109       {Sign::POS, -127, 0x80000000'00000000'00000000'00000000_u128}, // 1.0
110       {Sign::POS, -128, 0x80000000'00000000'00000000'00000000_u128}, // 0.5
111       {Sign::POS, -130, 0xaaaaaaaa'aaaaaaaa'aaaaaaaa'aaaaaaab_u128}, // 1/6
112       {Sign::POS, -132, 0xaaaaaaaa'aaaaaaaa'aaaaaaaa'aaaaaaab_u128}, // 1/24
113       {Sign::POS, -134, 0x88888888'88888888'88888888'88888889_u128}, // 1/120
114       {Sign::POS, -137, 0xb60b60b6'0b60b60b'60b60b60'b60b60b6_u128}, // 1/720
115       {Sign::POS, -140, 0xd00d00d0'0d00d00d'00d00d00'd00d00d0_u128}, // 1/5040
116   };
117 
118   Float128 p = fputil::polyeval(dx, COEFFS_128[0], COEFFS_128[1], COEFFS_128[2],
119                                 COEFFS_128[3], COEFFS_128[4], COEFFS_128[5],
120                                 COEFFS_128[6]);
121   return p;
122 }
123 
124 #ifdef DEBUGDEBUG
operator <<(std::ostream & OS,const Float128 & r)125 std::ostream &operator<<(std::ostream &OS, const Float128 &r) {
126   OS << (r.sign == Sign::NEG ? "-(" : "(") << r.mantissa.val[0] << " + "
127      << r.mantissa.val[1] << " * 2^64) * 2^" << r.exponent << "\n";
128   return OS;
129 }
130 
operator <<(std::ostream & OS,const DoubleDouble & r)131 std::ostream &operator<<(std::ostream &OS, const DoubleDouble &r) {
132   OS << std::hexfloat << "(" << r.hi << " + " << r.lo << ")"
133      << std::defaultfloat << "\n";
134   return OS;
135 }
136 #endif
137 
138 // Compute exp(x) - 1 using 128-bit precision.
139 // TODO(lntue): investigate triple-double precision implementation for this
140 // step.
expm1_f128(double x,double kd,int idx1,int idx2)141 [[maybe_unused]] Float128 expm1_f128(double x, double kd, int idx1, int idx2) {
142   // Recalculate dx:
143 
144   double t1 = fputil::multiply_add(kd, MLOG_2_EXP2_M12_HI, x); // exact
145   double t2 = kd * MLOG_2_EXP2_M12_MID_30;                     // exact
146   double t3 = kd * MLOG_2_EXP2_M12_LO;                         // Error < 2^-133
147 
148   Float128 dx = fputil::quick_add(
149       Float128(t1), fputil::quick_add(Float128(t2), Float128(t3)));
150 
151   // TODO: Skip recalculating exp_mid1 and exp_mid2.
152   Float128 exp_mid1 =
153       fputil::quick_add(Float128(EXP2_MID1[idx1].hi),
154                         fputil::quick_add(Float128(EXP2_MID1[idx1].mid),
155                                           Float128(EXP2_MID1[idx1].lo)));
156 
157   Float128 exp_mid2 =
158       fputil::quick_add(Float128(EXP2_MID2[idx2].hi),
159                         fputil::quick_add(Float128(EXP2_MID2[idx2].mid),
160                                           Float128(EXP2_MID2[idx2].lo)));
161 
162   Float128 exp_mid = fputil::quick_mul(exp_mid1, exp_mid2);
163 
164   int hi = static_cast<int>(kd) >> 12;
165   Float128 minus_one{Sign::NEG, -127 - hi,
166                      0x80000000'00000000'00000000'00000000_u128};
167 
168   Float128 exp_mid_m1 = fputil::quick_add(exp_mid, minus_one);
169 
170   Float128 p = poly_approx_f128(dx);
171 
172   // r = exp_mid * (1 + dx * P) - 1
173   //   = (exp_mid - 1) + (dx * exp_mid) * P
174   Float128 r =
175       fputil::multiply_add(fputil::quick_mul(exp_mid, dx), p, exp_mid_m1);
176 
177   r.exponent += hi;
178 
179 #ifdef DEBUGDEBUG
180   std::cout << "=== VERY SLOW PASS ===\n"
181             << "        kd: " << kd << "\n"
182             << "        hi: " << hi << "\n"
183             << " minus_one: " << minus_one << "        dx: " << dx
184             << "exp_mid_m1: " << exp_mid_m1 << "   exp_mid: " << exp_mid
185             << "         p: " << p << "         r: " << r << std::endl;
186 #endif
187 
188   return r;
189 }
190 
191 // Compute exp(x) - 1 with double-double precision.
exp_double_double(double x,double kd,const DoubleDouble & exp_mid,const DoubleDouble & hi_part)192 DoubleDouble exp_double_double(double x, double kd, const DoubleDouble &exp_mid,
193                                const DoubleDouble &hi_part) {
194   // Recalculate dx:
195   //   dx = x - k * 2^-12 * log(2)
196   double t1 = fputil::multiply_add(kd, MLOG_2_EXP2_M12_HI, x); // exact
197   double t2 = kd * MLOG_2_EXP2_M12_MID_30;                     // exact
198   double t3 = kd * MLOG_2_EXP2_M12_LO;                         // Error < 2^-130
199 
200   DoubleDouble dx = fputil::exact_add(t1, t2);
201   dx.lo += t3;
202 
203   // Degree-6 Taylor polynomial approximation in double-double precision.
204   // | p - exp(x) | < 2^-100.
205   DoubleDouble p = poly_approx_dd(dx);
206 
207   // Error bounds: 2^-99.
208   DoubleDouble r =
209       fputil::multiply_add(fputil::quick_mult(exp_mid, dx), p, hi_part);
210 
211 #ifdef DEBUGDEBUG
212   std::cout << "=== SLOW PASS ===\n"
213             << "   dx: " << dx << "    p: " << p << "    r: " << r << std::endl;
214 #endif
215 
216   return r;
217 }
218 
219 // Check for exceptional cases when
220 // |x| <= 2^-53 or x < log(2^-54) or x >= 0x1.6232bdd7abcd3p+9
set_exceptional(double x)221 double set_exceptional(double x) {
222   using FPBits = typename fputil::FPBits<double>;
223   FPBits xbits(x);
224 
225   uint64_t x_u = xbits.uintval();
226   uint64_t x_abs = xbits.abs().uintval();
227 
228   // |x| <= 2^-53.
229   if (x_abs <= 0x3ca0'0000'0000'0000ULL) {
230     // expm1(x) ~ x.
231 
232     if (LIBC_UNLIKELY(x_abs <= 0x0370'0000'0000'0000ULL)) {
233       if (LIBC_UNLIKELY(x_abs == 0))
234         return x;
235       // |x| <= 2^-968, need to scale up a bit before rounding, then scale it
236       // back down.
237       return 0x1.0p-200 * fputil::multiply_add(x, 0x1.0p+200, 0x1.0p-1022);
238     }
239 
240     // 2^-968 < |x| <= 2^-53.
241     return fputil::round_result_slightly_up(x);
242   }
243 
244   // x < log(2^-54) || x >= 0x1.6232bdd7abcd3p+9 or inf/nan.
245 
246   // x < log(2^-54) or -inf/nan
247   if (x_u >= 0xc042'b708'8723'20e2ULL) {
248     // expm1(-Inf) = -1
249     if (xbits.is_inf())
250       return -1.0;
251 
252     // exp(nan) = nan
253     if (xbits.is_nan())
254       return x;
255 
256     return fputil::round_result_slightly_up(-1.0);
257   }
258 
259   // x >= round(log(MAX_NORMAL), D, RU) = 0x1.62e42fefa39fp+9 or +inf/nan
260   // x is finite
261   if (x_u < 0x7ff0'0000'0000'0000ULL) {
262     int rounding = fputil::quick_get_round();
263     if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO)
264       return FPBits::max_normal().get_val();
265 
266     fputil::set_errno_if_required(ERANGE);
267     fputil::raise_except_if_required(FE_OVERFLOW);
268   }
269   // x is +inf or nan
270   return x + FPBits::inf().get_val();
271 }
272 
273 } // namespace
274 
275 LLVM_LIBC_FUNCTION(double, expm1, (double x)) {
276   using FPBits = typename fputil::FPBits<double>;
277 
278   FPBits xbits(x);
279 
280   bool x_is_neg = xbits.is_neg();
281   uint64_t x_u = xbits.uintval();
282 
283   // Upper bound: max normal number = 2^1023 * (2 - 2^-52)
284   // > round(log (2^1023 ( 2 - 2^-52 )), D, RU) = 0x1.62e42fefa39fp+9
285   // > round(log (2^1023 ( 2 - 2^-52 )), D, RD) = 0x1.62e42fefa39efp+9
286   // > round(log (2^1023 ( 2 - 2^-52 )), D, RN) = 0x1.62e42fefa39efp+9
287   // > round(exp(0x1.62e42fefa39fp+9), D, RN) = infty
288 
289   // Lower bound: log(2^-54) = -0x1.2b708872320e2p5
290   // > round(log(2^-54), D, RN) = -0x1.2b708872320e2p5
291 
292   // x < log(2^-54) or x >= 0x1.6232bdd7abcd3p+9 or |x| <= 2^-53.
293 
294   if (LIBC_UNLIKELY(x_u >= 0xc042b708872320e2 ||
295                     (x_u <= 0xbca0000000000000 && x_u >= 0x40862e42fefa39f0) ||
296                     x_u <= 0x3ca0000000000000)) {
297     return set_exceptional(x);
298   }
299 
300   // Now log(2^-54) <= x <= -2^-53 or 2^-53 <= x < log(2^1023 * (2 - 2^-52))
301 
302   // Range reduction:
303   // Let x = log(2) * (hi + mid1 + mid2) + lo
304   // in which:
305   //   hi is an integer
306   //   mid1 * 2^6 is an integer
307   //   mid2 * 2^12 is an integer
308   // then:
309   //   exp(x) = 2^hi * 2^(mid1) * 2^(mid2) * exp(lo).
310   // With this formula:
311   //   - multiplying by 2^hi is exact and cheap, simply by adding the exponent
312   //     field.
313   //   - 2^(mid1) and 2^(mid2) are stored in 2 x 64-element tables.
314   //   - exp(lo) ~ 1 + lo + a0 * lo^2 + ...
315   //
316   // They can be defined by:
317   //   hi + mid1 + mid2 = 2^(-12) * round(2^12 * log_2(e) * x)
318   // If we store L2E = round(log2(e), D, RN), then:
319   //   log2(e) - L2E ~ 1.5 * 2^(-56)
320   // So the errors when computing in double precision is:
321   //   | x * 2^12 * log_2(e) - D(x * 2^12 * L2E) | <=
322   //  <= | x * 2^12 * log_2(e) - x * 2^12 * L2E | +
323   //     + | x * 2^12 * L2E - D(x * 2^12 * L2E) |
324   //  <= 2^12 * ( |x| * 1.5 * 2^-56 + eps(x))  for RN
325   //     2^12 * ( |x| * 1.5 * 2^-56 + 2*eps(x)) for other rounding modes.
326   // So if:
327   //   hi + mid1 + mid2 = 2^(-12) * round(x * 2^12 * L2E) is computed entirely
328   // in double precision, the reduced argument:
329   //   lo = x - log(2) * (hi + mid1 + mid2) is bounded by:
330   //   |lo| <= 2^-13 + (|x| * 1.5 * 2^-56 + 2*eps(x))
331   //         < 2^-13 + (1.5 * 2^9 * 1.5 * 2^-56 + 2*2^(9 - 52))
332   //         < 2^-13 + 2^-41
333   //
334 
335   // The following trick computes the round(x * L2E) more efficiently
336   // than using the rounding instructions, with the tradeoff for less accuracy,
337   // and hence a slightly larger range for the reduced argument `lo`.
338   //
339   // To be precise, since |x| < |log(2^-1075)| < 1.5 * 2^9,
340   //   |x * 2^12 * L2E| < 1.5 * 2^9 * 1.5 < 2^23,
341   // So we can fit the rounded result round(x * 2^12 * L2E) in int32_t.
342   // Thus, the goal is to be able to use an additional addition and fixed width
343   // shift to get an int32_t representing round(x * 2^12 * L2E).
344   //
345   // Assuming int32_t using 2-complement representation, since the mantissa part
346   // of a double precision is unsigned with the leading bit hidden, if we add an
347   // extra constant C = 2^e1 + 2^e2 with e1 > e2 >= 2^25 to the product, the
348   // part that are < 2^e2 in resulted mantissa of (x*2^12*L2E + C) can be
349   // considered as a proper 2-complement representations of x*2^12*L2E.
350   //
351   // One small problem with this approach is that the sum (x*2^12*L2E + C) in
352   // double precision is rounded to the least significant bit of the dorminant
353   // factor C.  In order to minimize the rounding errors from this addition, we
354   // want to minimize e1.  Another constraint that we want is that after
355   // shifting the mantissa so that the least significant bit of int32_t
356   // corresponds to the unit bit of (x*2^12*L2E), the sign is correct without
357   // any adjustment.  So combining these 2 requirements, we can choose
358   //   C = 2^33 + 2^32, so that the sign bit corresponds to 2^31 bit, and hence
359   // after right shifting the mantissa, the resulting int32_t has correct sign.
360   // With this choice of C, the number of mantissa bits we need to shift to the
361   // right is: 52 - 33 = 19.
362   //
363   // Moreover, since the integer right shifts are equivalent to rounding down,
364   // we can add an extra 0.5 so that it will become round-to-nearest, tie-to-
365   // +infinity.  So in particular, we can compute:
366   //   hmm = x * 2^12 * L2E + C,
367   // where C = 2^33 + 2^32 + 2^-1, then if
368   //   k = int32_t(lower 51 bits of double(x * 2^12 * L2E + C) >> 19),
369   // the reduced argument:
370   //   lo = x - log(2) * 2^-12 * k is bounded by:
371   //   |lo| <= 2^-13 + 2^-41 + 2^-12*2^-19
372   //         = 2^-13 + 2^-31 + 2^-41.
373   //
374   // Finally, notice that k only uses the mantissa of x * 2^12 * L2E, so the
375   // exponent 2^12 is not needed.  So we can simply define
376   //   C = 2^(33 - 12) + 2^(32 - 12) + 2^(-13 - 12), and
377   //   k = int32_t(lower 51 bits of double(x * L2E + C) >> 19).
378 
379   // Rounding errors <= 2^-31 + 2^-41.
380   double tmp = fputil::multiply_add(x, LOG2_E, 0x1.8000'0000'4p21);
381   int k = static_cast<int>(cpp::bit_cast<uint64_t>(tmp) >> 19);
382   double kd = static_cast<double>(k);
383 
384   uint32_t idx1 = (k >> 6) & 0x3f;
385   uint32_t idx2 = k & 0x3f;
386   int hi = k >> 12;
387 
388   DoubleDouble exp_mid1{EXP2_MID1[idx1].mid, EXP2_MID1[idx1].hi};
389   DoubleDouble exp_mid2{EXP2_MID2[idx2].mid, EXP2_MID2[idx2].hi};
390 
391   DoubleDouble exp_mid = fputil::quick_mult(exp_mid1, exp_mid2);
392 
393   // -2^(-hi)
394   double one_scaled =
395       FPBits::create_value(Sign::NEG, FPBits::EXP_BIAS - hi, 0).get_val();
396 
397   // 2^(mid1 + mid2) - 2^(-hi)
398   DoubleDouble hi_part = x_is_neg ? fputil::exact_add(one_scaled, exp_mid.hi)
399                                   : fputil::exact_add(exp_mid.hi, one_scaled);
400 
401   hi_part.lo += exp_mid.lo;
402 
403   // |x - (hi + mid1 + mid2) * log(2) - dx| < 2^11 * eps(M_LOG_2_EXP2_M12.lo)
404   //                                        = 2^11 * 2^-13 * 2^-52
405   //                                        = 2^-54.
406   // |dx| < 2^-13 + 2^-30.
407   double lo_h = fputil::multiply_add(kd, MLOG_2_EXP2_M12_HI, x); // exact
408   double dx = fputil::multiply_add(kd, MLOG_2_EXP2_M12_MID, lo_h);
409 
410   // We use the degree-4 Taylor polynomial to approximate exp(lo):
411   //   exp(lo) ~ 1 + lo + lo^2 / 2 + lo^3 / 6 + lo^4 / 24 = 1 + lo * P(lo)
412   // So that the errors are bounded by:
413   //   |P(lo) - expm1(lo)/lo| < |lo|^4 / 64 < 2^(-13 * 4) / 64 = 2^-58
414   // Let P_ be an evaluation of P where all intermediate computations are in
415   // double precision.  Using either Horner's or Estrin's schemes, the evaluated
416   // errors can be bounded by:
417   //      |P_(dx) - P(dx)| < 2^-51
418   //   => |dx * P_(dx) - expm1(lo) | < 1.5 * 2^-64
419   //   => 2^(mid1 + mid2) * |dx * P_(dx) - expm1(lo)| < 1.5 * 2^-63.
420   // Since we approximate
421   //   2^(mid1 + mid2) ~ exp_mid.hi + exp_mid.lo,
422   // We use the expression:
423   //    (exp_mid.hi + exp_mid.lo) * (1 + dx * P_(dx)) ~
424   //  ~ exp_mid.hi + (exp_mid.hi * dx * P_(dx) + exp_mid.lo)
425   // with errors bounded by 1.5 * 2^-63.
426 
427   // Finally, we have the following approximation formula:
428   //   expm1(x) = 2^hi * 2^(mid1 + mid2) * exp(lo) - 1
429   //            = 2^hi * ( 2^(mid1 + mid2) * exp(lo) - 2^(-hi) )
430   //            ~ 2^hi * ( (exp_mid.hi - 2^-hi) +
431   //                       + (exp_mid.hi * dx * P_(dx) + exp_mid.lo))
432 
433   double mid_lo = dx * exp_mid.hi;
434 
435   // Approximate expm1(dx)/dx ~ 1 + dx / 2 + dx^2 / 6 + dx^3 / 24.
436   double p = poly_approx_d(dx);
437 
438   double lo = fputil::multiply_add(p, mid_lo, hi_part.lo);
439 
440   // TODO: The following line leaks encoding abstraction. Use FPBits methods
441   // instead.
442   uint64_t err = x_is_neg ? (static_cast<uint64_t>(-hi) << 52) : 0;
443 
444   double err_d = cpp::bit_cast<double>(ERR_D + err);
445 
446   double upper = hi_part.hi + (lo + err_d);
447   double lower = hi_part.hi + (lo - err_d);
448 
449 #ifdef DEBUGDEBUG
450   std::cout << "=== FAST PASS ===\n"
451             << "      x: " << std::hexfloat << x << std::defaultfloat << "\n"
452             << "      k: " << k << "\n"
453             << "   idx1: " << idx1 << "\n"
454             << "   idx2: " << idx2 << "\n"
455             << "     hi: " << hi << "\n"
456             << "     dx: " << std::hexfloat << dx << std::defaultfloat << "\n"
457             << "exp_mid: " << exp_mid << "hi_part: " << hi_part
458             << " mid_lo: " << std::hexfloat << mid_lo << std::defaultfloat
459             << "\n"
460             << "      p: " << std::hexfloat << p << std::defaultfloat << "\n"
461             << "     lo: " << std::hexfloat << lo << std::defaultfloat << "\n"
462             << "  upper: " << std::hexfloat << upper << std::defaultfloat
463             << "\n"
464             << "  lower: " << std::hexfloat << lower << std::defaultfloat
465             << "\n"
466             << std::endl;
467 #endif
468 
469   if (LIBC_LIKELY(upper == lower)) {
470     // to multiply by 2^hi, a fast way is to simply add hi to the exponent
471     // field.
472     int64_t exp_hi = static_cast<int64_t>(hi) << FPBits::FRACTION_LEN;
473     double r = cpp::bit_cast<double>(exp_hi + cpp::bit_cast<int64_t>(upper));
474     return r;
475   }
476 
477   // Use double-double
478   DoubleDouble r_dd = exp_double_double(x, kd, exp_mid, hi_part);
479 
480 #ifdef LIBC_MATH_EXPM1_SKIP_ACCURATE_PASS
481   int64_t exp_hi = static_cast<int64_t>(hi) << FPBits::FRACTION_LEN;
482   double r =
483       cpp::bit_cast<double>(exp_hi + cpp::bit_cast<int64_t>(r_dd.hi + r_dd.lo));
484   return r;
485 #else
486   double err_dd = cpp::bit_cast<double>(ERR_DD + err);
487 
488   double upper_dd = r_dd.hi + (r_dd.lo + err_dd);
489   double lower_dd = r_dd.hi + (r_dd.lo - err_dd);
490 
491   if (LIBC_LIKELY(upper_dd == lower_dd)) {
492     int64_t exp_hi = static_cast<int64_t>(hi) << FPBits::FRACTION_LEN;
493     double r = cpp::bit_cast<double>(exp_hi + cpp::bit_cast<int64_t>(upper_dd));
494     return r;
495   }
496 
497   // Use 128-bit precision
498   Float128 r_f128 = expm1_f128(x, kd, idx1, idx2);
499 
500   return static_cast<double>(r_f128);
501 #endif // LIBC_MATH_EXPM1_SKIP_ACCURATE_PASS
502 }
503 
504 } // namespace LIBC_NAMESPACE_DECL
505