1 //===-- Implementation of cbrt function -----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "src/math/cbrt.h"
10 #include "hdr/fenv_macros.h"
11 #include "src/__support/FPUtil/FEnvImpl.h"
12 #include "src/__support/FPUtil/FPBits.h"
13 #include "src/__support/FPUtil/PolyEval.h"
14 #include "src/__support/FPUtil/double_double.h"
15 #include "src/__support/FPUtil/dyadic_float.h"
16 #include "src/__support/FPUtil/multiply_add.h"
17 #include "src/__support/common.h"
18 #include "src/__support/integer_literals.h"
19 #include "src/__support/macros/config.h"
20 #include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
21
22 #if ((LIBC_MATH & LIBC_MATH_SKIP_ACCURATE_PASS) != 0)
23 #define LIBC_MATH_CBRT_SKIP_ACCURATE_PASS
24 #endif
25
26 namespace LIBC_NAMESPACE_DECL {
27
28 using DoubleDouble = fputil::DoubleDouble;
29 using Float128 = fputil::DyadicFloat<128>;
30
31 namespace {
32
33 // Initial approximation of x^(-2/3) for 1 <= x < 2.
34 // Polynomial generated by Sollya with:
35 // > P = fpminimax(x^(-2/3), 7, [|D...|], [1, 2]);
36 // > dirtyinfnorm(P/x^(-2/3) - 1, [1, 2]);
37 // 0x1.28...p-21
intial_approximation(double x)38 double intial_approximation(double x) {
39 constexpr double COEFFS[8] = {
40 0x1.bc52aedead5c6p1, -0x1.b52bfebf110b3p2, 0x1.1d8d71d53d126p3,
41 -0x1.de2db9e81cf87p2, 0x1.0154ca06153bdp2, -0x1.5973c66ee6da7p0,
42 0x1.07bf6ac832552p-2, -0x1.5e53d9ce41cb8p-6,
43 };
44
45 double x_sq = x * x;
46
47 double c0 = fputil::multiply_add(x, COEFFS[1], COEFFS[0]);
48 double c1 = fputil::multiply_add(x, COEFFS[3], COEFFS[2]);
49 double c2 = fputil::multiply_add(x, COEFFS[5], COEFFS[4]);
50 double c3 = fputil::multiply_add(x, COEFFS[7], COEFFS[6]);
51
52 double x_4 = x_sq * x_sq;
53 double d0 = fputil::multiply_add(x_sq, c1, c0);
54 double d1 = fputil::multiply_add(x_sq, c3, c2);
55
56 return fputil::multiply_add(x_4, d1, d0);
57 }
58
59 // Get the error term for Newton iteration:
60 // h(x) = x^3 * a^2 - 1,
61 #ifdef LIBC_TARGET_CPU_HAS_FMA
get_error(const DoubleDouble & x_3,const DoubleDouble & a_sq)62 double get_error(const DoubleDouble &x_3, const DoubleDouble &a_sq) {
63 return fputil::multiply_add(x_3.hi, a_sq.hi, -1.0) +
64 fputil::multiply_add(x_3.lo, a_sq.hi, x_3.hi * a_sq.lo);
65 }
66 #else
get_error(const DoubleDouble & x_3,const DoubleDouble & a_sq)67 double get_error(const DoubleDouble &x_3, const DoubleDouble &a_sq) {
68 DoubleDouble x_3_a_sq = fputil::quick_mult(a_sq, x_3);
69 return (x_3_a_sq.hi - 1.0) + x_3_a_sq.lo;
70 }
71 #endif
72
73 } // anonymous namespace
74
75 // Correctly rounded cbrt algorithm:
76 //
77 // === Step 1 - Range reduction ===
78 // For x = (-1)^s * 2^e * (1.m), we get 2 reduced arguments x_r and a as:
79 // x_r = 1.m
80 // a = (-1)^s * 2^(e % 3) * (1.m)
81 // Then cbrt(x) = x^(1/3) can be computed as:
82 // x^(1/3) = 2^(e / 3) * a^(1/3).
83 //
84 // In order to avoid division, we compute a^(-2/3) using Newton method and then
85 // multiply the results by a:
86 // a^(1/3) = a * a^(-2/3).
87 //
88 // === Step 2 - First approximation to a^(-2/3) ===
89 // First, we use a degree-7 minimax polynomial generated by Sollya to
90 // approximate x_r^(-2/3) for 1 <= x_r < 2.
91 // p = P(x_r) ~ x_r^(-2/3),
92 // with relative errors bounded by:
93 // | p / x_r^(-2/3) - 1 | < 1.16 * 2^-21.
94 //
95 // Then we multiply with 2^(e % 3) from a small lookup table to get:
96 // x_0 = 2^(-2*(e % 3)/3) * p
97 // ~ 2^(-2*(e % 3)/3) * x_r^(-2/3)
98 // = a^(-2/3)
99 // With relative errors:
100 // | x_0 / a^(-2/3) - 1 | < 1.16 * 2^-21.
101 // This step is done in double precision.
102 //
103 // === Step 3 - First Newton iteration ===
104 // We follow the method described in:
105 // Sibidanov, A. and Zimmermann, P., "Correctly rounded cubic root evaluation
106 // in double precision", https://core-math.gitlabpages.inria.fr/cbrt64.pdf
107 // to derive multiplicative Newton iterations as below:
108 // Let x_n be the nth approximation to a^(-2/3). Define the n^th error as:
109 // h_n = x_n^3 * a^2 - 1
110 // Then:
111 // a^(-2/3) = x_n / (1 + h_n)^(1/3)
112 // = x_n * (1 - (1/3) * h_n + (2/9) * h_n^2 - (14/81) * h_n^3 + ...)
113 // using the Taylor series expansion of (1 + h_n)^(-1/3).
114 //
115 // Apply to x_0 above:
116 // h_0 = x_0^3 * a^2 - 1
117 // = a^2 * (x_0 - a^(-2/3)) * (x_0^2 + x_0 * a^(-2/3) + a^(-4/3)),
118 // it's bounded by:
119 // |h_0| < 4 * 3 * 1.16 * 2^-21 * 4 < 2^-17.
120 // So in the first iteration step, we use:
121 // x_1 = x_0 * (1 - (1/3) * h_n + (2/9) * h_n^2 - (14/81) * h_n^3)
122 // Its relative error is bounded by:
123 // | x_1 / a^(-2/3) - 1 | < 35/242 * |h_0|^4 < 2^-70.
124 // Then we perform Ziv's rounding test and check if the answer is exact.
125 // This step is done in double-double precision.
126 //
127 // === Step 4 - Second Newton iteration ===
128 // If the Ziv's rounding test from the previous step fails, we define the error
129 // term:
130 // h_1 = x_1^3 * a^2 - 1,
131 // And perform another iteration:
132 // x_2 = x_1 * (1 - h_1 / 3)
133 // with the relative errors exceed the precision of double-double.
134 // We then check the Ziv's accuracy test with relative errors < 2^-102 to
135 // compensate for rounding errors.
136 //
137 // === Step 5 - Final iteration ===
138 // If the Ziv's accuracy test from the previous step fails, we perform another
139 // iteration in 128-bit precision and check for exact outputs.
140 //
141 // TODO: It is possible to replace this costly computation step with special
142 // exceptional handling, similar to what was done in the CORE-MATH project:
143 // https://gitlab.inria.fr/core-math/core-math/-/blob/master/src/binary64/cbrt/cbrt.c
144
145 LLVM_LIBC_FUNCTION(double, cbrt, (double x)) {
146 using FPBits = fputil::FPBits<double>;
147
148 uint64_t x_abs = FPBits(x).abs().uintval();
149
150 unsigned exp_bias_correction = 682; // 1023 * 2/3
151
152 if (LIBC_UNLIKELY(x_abs < FPBits::min_normal().uintval() ||
153 x_abs >= FPBits::inf().uintval())) {
154 if (x == 0.0 || x_abs >= FPBits::inf().uintval())
155 // x is 0, Inf, or NaN.
156 // Make sure it works for FTZ/DAZ modes.
157 return static_cast<double>(x + x);
158
159 // x is non-zero denormal number.
160 // Normalize x.
161 x *= 0x1.0p60;
162 exp_bias_correction -= 20;
163 }
164
165 FPBits x_bits(x);
166
167 // When using biased exponent of x in double precision,
168 // x_e = real_exponent_of_x + 1023
169 // Then:
170 // x_e / 3 = real_exponent_of_x / 3 + 1023/3
171 // = real_exponent_of_x / 3 + 341
172 // So to make it the correct biased exponent of x^(1/3), we add
173 // 1023 - 341 = 682
174 // to the quotient x_e / 3.
175 unsigned x_e = static_cast<unsigned>(x_bits.get_biased_exponent());
176 unsigned out_e = (x_e / 3 + exp_bias_correction);
177 unsigned shift_e = x_e % 3;
178
179 // Set x_r = 1.mantissa
180 double x_r =
181 FPBits(x_bits.get_mantissa() |
182 (static_cast<uint64_t>(FPBits::EXP_BIAS) << FPBits::FRACTION_LEN))
183 .get_val();
184
185 // Set a = (-1)^x_sign * 2^(x_e % 3) * (1.mantissa)
186 uint64_t a_bits = x_bits.uintval() & 0x800F'FFFF'FFFF'FFFF;
187 a_bits |=
188 (static_cast<uint64_t>(shift_e + static_cast<unsigned>(FPBits::EXP_BIAS))
189 << FPBits::FRACTION_LEN);
190 double a = FPBits(a_bits).get_val();
191
192 // Initial approximation of x_r^(-2/3).
193 double p = intial_approximation(x_r);
194
195 // Look up for 2^(-2*n/3) used for first approximation step.
196 constexpr double EXP2_M2_OVER_3[3] = {1.0, 0x1.428a2f98d728bp-1,
197 0x1.965fea53d6e3dp-2};
198
199 // x0 is an initial approximation of a^(-2/3) for 1 <= |a| < 8.
200 // Relative error: < 1.16 * 2^(-21).
201 double x0 = static_cast<double>(EXP2_M2_OVER_3[shift_e] * p);
202
203 // First iteration in double precision.
204 DoubleDouble a_sq = fputil::exact_mult(a, a);
205
206 // h0 = x0^3 * a^2 - 1
207 DoubleDouble x0_sq = fputil::exact_mult(x0, x0);
208 DoubleDouble x0_3 = fputil::quick_mult(x0, x0_sq);
209
210 double h0 = get_error(x0_3, a_sq);
211
212 #ifdef LIBC_MATH_CBRT_SKIP_ACCURATE_PASS
213 constexpr double REL_ERROR = 0;
214 #else
215 constexpr double REL_ERROR = 0x1.0p-51;
216 #endif // LIBC_MATH_CBRT_SKIP_ACCURATE_PASS
217
218 // Taylor polynomial of (1 + h)^(-1/3):
219 // (1 + h)^(-1/3) = 1 - h/3 + 2 h^2 / 9 - 14 h^3 / 81 + ...
220 constexpr double ERR_COEFFS[3] = {
221 -0x1.5555555555555p-2 - REL_ERROR, // -1/3 - relative_error
222 0x1.c71c71c71c71cp-3, // 2/9
223 -0x1.61f9add3c0ca4p-3, // -14/81
224 };
225 // e0 = -14 * h^2 / 81 + 2 * h / 9 - 1/3 - relative_error.
226 double e0 = fputil::polyeval(h0, ERR_COEFFS[0], ERR_COEFFS[1], ERR_COEFFS[2]);
227 double x0_h0 = x0 * h0;
228
229 // x1 = x0 (1 - h0/3 + 2 h0^2 / 9 - 14 h0^3 / 81)
230 // x1 approximate a^(-2/3) with relative errors bounded by:
231 // | x1 / a^(-2/3) - 1 | < (34/243) h0^4 < h0 * REL_ERROR
232 DoubleDouble x1_dd{x0_h0 * e0, x0};
233
234 // r1 = x1 * a ~ a^(-2/3) * a = a^(1/3).
235 DoubleDouble r1 = fputil::quick_mult(a, x1_dd);
236
237 // Lambda function to update the exponent of the result.
__anon5d527db00202(double r) 238 auto update_exponent = [=](double r) -> double {
239 uint64_t r_m = FPBits(r).uintval() - 0x3FF0'0000'0000'0000;
240 // Adjust exponent and sign.
241 uint64_t r_bits =
242 r_m + (static_cast<uint64_t>(out_e) << FPBits::FRACTION_LEN);
243 return FPBits(r_bits).get_val();
244 };
245
246 #ifdef LIBC_MATH_CBRT_SKIP_ACCURATE_PASS
247 // TODO: We probably don't need to use double-double if accurate tests and
248 // passes are skipped.
249 return update_exponent(r1.hi + r1.lo);
250 #else
251 // Accurate checks and passes.
252 double r1_lower = r1.hi + r1.lo;
253 double r1_upper =
254 r1.hi + fputil::multiply_add(x0_h0, 2.0 * REL_ERROR * a, r1.lo);
255
256 // Ziv's accuracy test.
257 if (LIBC_LIKELY(r1_upper == r1_lower)) {
258 // Test for exact outputs.
259 // Check if lower (52 - 17 = 35) bits are 0's.
260 if (LIBC_UNLIKELY((FPBits(r1_lower).uintval() & 0x0000'0007'FFFF'FFFF) ==
261 0)) {
262 double r1_err = (r1_lower - r1.hi) - r1.lo;
263 if (FPBits(r1_err).abs().get_val() < 0x1.0p69)
264 fputil::clear_except_if_required(FE_INEXACT);
265 }
266
267 return update_exponent(r1_lower);
268 }
269
270 // Accuracy test failed, perform another Newton iteration.
271 double x1 = x1_dd.hi + (e0 + REL_ERROR) * x0_h0;
272
273 // Second iteration in double-double precision.
274 // h1 = x1^3 * a^2 - 1.
275 DoubleDouble x1_sq = fputil::exact_mult(x1, x1);
276 DoubleDouble x1_3 = fputil::quick_mult(x1, x1_sq);
277 double h1 = get_error(x1_3, a_sq);
278
279 // e1 = -x1*h1/3.
280 double e1 = h1 * (x1 * -0x1.5555555555555p-2);
281 // x2 = x1*(1 - h1/3) = x1 + e1 ~ a^(-2/3) with relative errors < 2^-101.
282 DoubleDouble x2 = fputil::exact_add(x1, e1);
283 // r2 = a * x2 ~ a * a^(-2/3) = a^(1/3) with relative errors < 2^-100.
284 DoubleDouble r2 = fputil::quick_mult(a, x2);
285
286 double r2_upper = r2.hi + fputil::multiply_add(a, 0x1.0p-102, r2.lo);
287 double r2_lower = r2.hi + fputil::multiply_add(a, -0x1.0p-102, r2.lo);
288
289 // Ziv's accuracy test.
290 if (LIBC_LIKELY(r2_upper == r2_lower))
291 return update_exponent(r2_upper);
292
293 // TODO: Investigate removing float128 and just list exceptional cases.
294 // Apply another Newton iteration with ~126-bit accuracy.
295 Float128 x2_f128 = fputil::quick_add(Float128(x2.hi), Float128(x2.lo));
296 // x2^3
297 Float128 x2_3 =
298 fputil::quick_mul(fputil::quick_mul(x2_f128, x2_f128), x2_f128);
299 // a^2
300 Float128 a_sq_f128 = fputil::quick_mul(Float128(a), Float128(a));
301 // x2^3 * a^2
302 Float128 x2_3_a_sq = fputil::quick_mul(x2_3, a_sq_f128);
303 // h2 = x2^3 * a^2 - 1
304 Float128 h2_f128 = fputil::quick_add(x2_3_a_sq, Float128(-1.0));
305 double h2 = static_cast<double>(h2_f128);
306 // t2 = 1 - h2 / 3
307 Float128 t2 =
308 fputil::quick_add(Float128(1.0), Float128(h2 * (-0x1.5555555555555p-2)));
309 // x3 = x2 * (1 - h2 / 3) ~ a^(-2/3)
310 Float128 x3 = fputil::quick_mul(x2_f128, t2);
311 // r3 = a * x3 ~ a * a^(-2/3) = a^(1/3)
312 Float128 r3 = fputil::quick_mul(Float128(a), x3);
313
314 // Check for exact cases:
315 Float128::MantissaType rounding_bits =
316 r3.mantissa & 0x0000'0000'0000'03FF'FFFF'FFFF'FFFF'FFFF_u128;
317
318 double result = static_cast<double>(r3);
319 if ((rounding_bits < 0x0000'0000'0000'0000'0000'0000'0000'000F_u128) ||
320 (rounding_bits >= 0x0000'0000'0000'03FF'FFFF'FFFF'FFFF'FFF0_u128)) {
321 // Output is exact.
322 r3.mantissa &= 0xFFFF'FFFF'FFFF'FFFF'FFFF'FFFF'FFFF'FFF0_u128;
323
324 if (rounding_bits >= 0x0000'0000'0000'03FF'FFFF'FFFF'FFFF'FFF0_u128) {
325 Float128 tmp{r3.sign, r3.exponent - 123,
326 0x8000'0000'0000'0000'0000'0000'0000'0000_u128};
327 Float128 r4 = fputil::quick_add(r3, tmp);
328 result = static_cast<double>(r4);
329 } else {
330 result = static_cast<double>(r3);
331 }
332
333 fputil::clear_except_if_required(FE_INEXACT);
334 }
335
336 return update_exponent(result);
337 #endif // LIBC_MATH_CBRT_SKIP_ACCURATE_PASS
338 }
339
340 } // namespace LIBC_NAMESPACE_DECL
341