xref: /aosp_15_r20/external/pytorch/c10/util/llvmMathExtras.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains some functions that are useful for math stuff.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #pragma once
14 
15 #include <c10/util/bit_cast.h>
16 
17 #include <algorithm>
18 #include <cassert>
19 #include <climits>
20 #include <cmath>
21 #include <cstdint>
22 #include <cstring>
23 #include <limits>
24 #include <type_traits>
25 
26 #ifdef __ANDROID_NDK__
27 #include <android/api-level.h>
28 #endif
29 
30 #ifndef __has_builtin
31 #define __has_builtin(x) 0
32 #endif
33 
34 #ifndef LLVM_GNUC_PREREQ
35 #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
36 #define LLVM_GNUC_PREREQ(maj, min, patch)                             \
37   ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
38    ((maj) << 20) + ((min) << 10) + (patch))
39 #elif defined(__GNUC__) && defined(__GNUC_MINOR__)
40 #define LLVM_GNUC_PREREQ(maj, min, patch) \
41   ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
42 #else
43 #define LLVM_GNUC_PREREQ(maj, min, patch) 0
44 #endif
45 #endif
46 
47 #ifdef _MSC_VER
48 // Declare these intrinsics manually rather including intrin.h. It's very
49 // expensive, and MathExtras.h is popular.
50 // #include <intrin.h>
51 extern "C" {
52 unsigned char _BitScanForward(unsigned long* _Index, unsigned long _Mask);
53 unsigned char _BitScanForward64(unsigned long* _Index, unsigned __int64 _Mask);
54 unsigned char _BitScanReverse(unsigned long* _Index, unsigned long _Mask);
55 unsigned char _BitScanReverse64(unsigned long* _Index, unsigned __int64 _Mask);
56 }
57 #endif
58 
59 namespace c10::llvm {
60 /// The behavior an operation has on an input of 0.
61 enum ZeroBehavior {
62   /// The returned value is undefined.
63   ZB_Undefined,
64   /// The returned value is numeric_limits<T>::max()
65   ZB_Max,
66   /// The returned value is numeric_limits<T>::digits
67   ZB_Width
68 };
69 
70 namespace detail {
71 template <typename T, std::size_t SizeOfT>
72 struct TrailingZerosCounter {
countTrailingZerosCounter73   static std::size_t count(T Val, ZeroBehavior) {
74     if (!Val)
75       return std::numeric_limits<T>::digits;
76     if (Val & 0x1)
77       return 0;
78 
79     // Bisection method.
80     std::size_t ZeroBits = 0;
81     T Shift = std::numeric_limits<T>::digits >> 1;
82     T Mask = std::numeric_limits<T>::max() >> Shift;
83     while (Shift) {
84       if ((Val & Mask) == 0) {
85         Val >>= Shift;
86         ZeroBits |= Shift;
87       }
88       Shift >>= 1;
89       Mask >>= Shift;
90     }
91     return ZeroBits;
92   }
93 };
94 
95 #if (defined(__GNUC__) && __GNUC__ >= 4) || defined(_MSC_VER)
96 template <typename T>
97 struct TrailingZerosCounter<T, 4> {
98   static std::size_t count(T Val, ZeroBehavior ZB) {
99     if (ZB != ZB_Undefined && Val == 0)
100       return 32;
101 
102 #if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0)
103     return __builtin_ctz(Val);
104 #elif defined(_MSC_VER)
105     unsigned long Index;
106     _BitScanForward(&Index, Val);
107     return Index;
108 #endif
109   }
110 };
111 
112 #if !defined(_MSC_VER) || defined(_M_X64)
113 template <typename T>
114 struct TrailingZerosCounter<T, 8> {
115   static std::size_t count(T Val, ZeroBehavior ZB) {
116     if (ZB != ZB_Undefined && Val == 0)
117       return 64;
118 
119 #if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0)
120     return __builtin_ctzll(Val);
121 #elif defined(_MSC_VER)
122     unsigned long Index;
123     _BitScanForward64(&Index, Val);
124     return Index;
125 #endif
126   }
127 };
128 #endif
129 #endif
130 } // namespace detail
131 
132 /// Count number of 0's from the least significant bit to the most
133 ///   stopping at the first 1.
134 ///
135 /// Only unsigned integral types are allowed.
136 ///
137 /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
138 ///   valid arguments.
139 template <typename T>
140 std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
141   static_assert(
142       std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
143       "Only unsigned integral types are allowed.");
144   return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
145 }
146 
147 namespace detail {
148 template <typename T, std::size_t SizeOfT>
149 struct LeadingZerosCounter {
150   static std::size_t count(T Val, ZeroBehavior) {
151     if (!Val)
152       return std::numeric_limits<T>::digits;
153 
154     // Bisection method.
155     std::size_t ZeroBits = 0;
156     for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
157       T Tmp = Val >> Shift;
158       if (Tmp)
159         Val = Tmp;
160       else
161         ZeroBits |= Shift;
162     }
163     return ZeroBits;
164   }
165 };
166 
167 #if (defined(__GNUC__) && __GNUC__ >= 4) || defined(_MSC_VER)
168 template <typename T>
169 struct LeadingZerosCounter<T, 4> {
170   static std::size_t count(T Val, ZeroBehavior ZB) {
171     if (ZB != ZB_Undefined && Val == 0)
172       return 32;
173 
174 #if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0)
175     return __builtin_clz(Val);
176 #elif defined(_MSC_VER)
177     unsigned long Index;
178     _BitScanReverse(&Index, Val);
179     return Index ^ 31;
180 #endif
181   }
182 };
183 
184 #if !defined(_MSC_VER) || defined(_M_X64)
185 template <typename T>
186 struct LeadingZerosCounter<T, 8> {
187   static std::size_t count(T Val, ZeroBehavior ZB) {
188     if (ZB != ZB_Undefined && Val == 0)
189       return 64;
190 
191 #if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0)
192     return __builtin_clzll(Val);
193 #elif defined(_MSC_VER)
194     unsigned long Index;
195     _BitScanReverse64(&Index, Val);
196     return Index ^ 63;
197 #endif
198   }
199 };
200 #endif
201 #endif
202 } // namespace detail
203 
204 /// Count number of 0's from the most significant bit to the least
205 ///   stopping at the first 1.
206 ///
207 /// Only unsigned integral types are allowed.
208 ///
209 /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
210 ///   valid arguments.
211 template <typename T>
212 std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
213   static_assert(
214       std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
215       "Only unsigned integral types are allowed.");
216   return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
217 }
218 
219 /// Get the index of the first set bit starting from the least
220 ///   significant bit.
221 ///
222 /// Only unsigned integral types are allowed.
223 ///
224 /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
225 ///   valid arguments.
226 template <typename T>
227 T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
228   if (ZB == ZB_Max && Val == 0)
229     return std::numeric_limits<T>::max();
230 
231   return countTrailingZeros(Val, ZB_Undefined);
232 }
233 
234 /// Create a bitmask with the N right-most bits set to 1, and all other
235 /// bits set to 0.  Only unsigned types are allowed.
236 template <typename T>
237 T maskTrailingOnes(unsigned N) {
238   static_assert(std::is_unsigned_v<T>, "Invalid type!");
239   const unsigned Bits = CHAR_BIT * sizeof(T);
240   assert(N <= Bits && "Invalid bit index");
241   return N == 0 ? 0 : (T(-1) >> (Bits - N));
242 }
243 
244 /// Create a bitmask with the N left-most bits set to 1, and all other
245 /// bits set to 0.  Only unsigned types are allowed.
246 template <typename T>
247 T maskLeadingOnes(unsigned N) {
248   return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
249 }
250 
251 /// Create a bitmask with the N right-most bits set to 0, and all other
252 /// bits set to 1.  Only unsigned types are allowed.
253 template <typename T>
254 T maskTrailingZeros(unsigned N) {
255   return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N);
256 }
257 
258 /// Create a bitmask with the N left-most bits set to 0, and all other
259 /// bits set to 1.  Only unsigned types are allowed.
260 template <typename T>
261 T maskLeadingZeros(unsigned N) {
262   return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
263 }
264 
265 /// Get the index of the last set bit starting from the least
266 ///   significant bit.
267 ///
268 /// Only unsigned integral types are allowed.
269 ///
270 /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
271 ///   valid arguments.
272 template <typename T>
273 T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
274   if (ZB == ZB_Max && Val == 0)
275     return std::numeric_limits<T>::max();
276 
277   // Use ^ instead of - because both gcc and llvm can remove the associated ^
278   // in the __builtin_clz intrinsic on x86.
279   return countLeadingZeros(Val, ZB_Undefined) ^
280       (std::numeric_limits<T>::digits - 1);
281 }
282 
283 /// Macro compressed bit reversal table for 256 bits.
284 ///
285 /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
286 /// NOLINTNEXTLINE(*c-arrays*)
287 static constexpr unsigned char BitReverseTable256[256] = {
288 #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
289 #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
290 #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
291     R6(0),
292     R6(2),
293     R6(1),
294     R6(3)
295 #undef R2
296 #undef R4
297 #undef R6
298 };
299 
300 /// Reverse the bits in \p Val.
301 template <typename T>
302 T reverseBits(T Val) {
303   // NOLINTNEXTLINE(*c-arrays*)
304   unsigned char in[sizeof(Val)];
305   // NOLINTNEXTLINE(*c-arrays*)
306   unsigned char out[sizeof(Val)];
307   std::memcpy(in, &Val, sizeof(Val));
308   for (unsigned i = 0; i < sizeof(Val); ++i)
309     out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
310   std::memcpy(&Val, out, sizeof(Val));
311   return Val;
312 }
313 
314 // NOTE: The following support functions use the _32/_64 extensions instead of
315 // type overloading so that signed and unsigned integers can be used without
316 // ambiguity.
317 
318 /// Return the high 32 bits of a 64 bit value.
319 constexpr inline uint32_t Hi_32(uint64_t Value) {
320   return static_cast<uint32_t>(Value >> 32);
321 }
322 
323 /// Return the low 32 bits of a 64 bit value.
324 constexpr inline uint32_t Lo_32(uint64_t Value) {
325   return static_cast<uint32_t>(Value);
326 }
327 
328 /// Make a 64-bit integer from a high / low pair of 32-bit integers.
329 constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
330   return ((uint64_t)High << 32) | (uint64_t)Low;
331 }
332 
333 /// Checks if an integer fits into the given bit width.
334 template <unsigned N>
335 constexpr inline bool isInt(int64_t x) {
336   return N >= 64 ||
337       (-(INT64_C(1) << (N - 1)) <= x && x < (INT64_C(1) << (N - 1)));
338 }
339 // Template specializations to get better code for common cases.
340 template <>
341 constexpr inline bool isInt<8>(int64_t x) {
342   return static_cast<int8_t>(x) == x;
343 }
344 template <>
345 constexpr inline bool isInt<16>(int64_t x) {
346   return static_cast<int16_t>(x) == x;
347 }
348 template <>
349 constexpr inline bool isInt<32>(int64_t x) {
350   return static_cast<int32_t>(x) == x;
351 }
352 
353 /// Checks if a signed integer is an N bit number shifted left by S.
354 template <unsigned N, unsigned S>
355 constexpr inline bool isShiftedInt(int64_t x) {
356   static_assert(
357       N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
358   static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
359   return isInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
360 }
361 
362 /// Checks if an unsigned integer fits into the given bit width.
363 ///
364 /// This is written as two functions rather than as simply
365 ///
366 ///   return N >= 64 || X < (UINT64_C(1) << N);
367 ///
368 /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
369 /// left too many places.
370 template <unsigned N>
371 constexpr inline std::enable_if_t<(N < 64), bool> isUInt(uint64_t X) {
372   static_assert(N > 0, "isUInt<0> doesn't make sense");
373   return X < (UINT64_C(1) << (N));
374 }
375 template <unsigned N>
376 constexpr inline std::enable_if_t<N >= 64, bool> isUInt(uint64_t /*X*/) {
377   return true;
378 }
379 
380 // Template specializations to get better code for common cases.
381 template <>
382 constexpr inline bool isUInt<8>(uint64_t x) {
383   return static_cast<uint8_t>(x) == x;
384 }
385 template <>
386 constexpr inline bool isUInt<16>(uint64_t x) {
387   return static_cast<uint16_t>(x) == x;
388 }
389 template <>
390 constexpr inline bool isUInt<32>(uint64_t x) {
391   return static_cast<uint32_t>(x) == x;
392 }
393 
394 /// Checks if a unsigned integer is an N bit number shifted left by S.
395 template <unsigned N, unsigned S>
396 constexpr inline bool isShiftedUInt(uint64_t x) {
397   static_assert(
398       N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
399   static_assert(
400       N + S <= 64, "isShiftedUInt<N, S> with N + S > 64 is too wide.");
401   // Per the two static_asserts above, S must be strictly less than 64.  So
402   // 1 << S is not undefined behavior.
403   return isUInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
404 }
405 
406 /// Gets the maximum value for a N-bit unsigned integer.
407 inline uint64_t maxUIntN(uint64_t N) {
408   assert(N > 0 && N <= 64 && "integer width out of range");
409 
410   // uint64_t(1) << 64 is undefined behavior, so we can't do
411   //   (uint64_t(1) << N) - 1
412   // without checking first that N != 64.  But this works and doesn't have a
413   // branch.
414   return UINT64_MAX >> (64 - N);
415 }
416 
417 // Ignore the false warning "Arithmetic overflow" for MSVC
418 #ifdef _MSC_VER
419 #pragma warning(push)
420 #pragma warning(disable : 4146)
421 #endif
422 
423 /// Gets the minimum value for a N-bit signed integer.
424 inline int64_t minIntN(int64_t N) {
425   assert(N > 0 && N <= 64 && "integer width out of range");
426   // NOLINTNEXTLINE(*-narrowing-conversions)
427   return -(UINT64_C(1) << (N - 1));
428 }
429 
430 #ifdef _MSC_VER
431 #pragma warning(pop)
432 #endif
433 
434 /// Gets the maximum value for a N-bit signed integer.
435 inline int64_t maxIntN(int64_t N) {
436   assert(N > 0 && N <= 64 && "integer width out of range");
437 
438   // This relies on two's complement wraparound when N == 64, so we convert to
439   // int64_t only at the very end to avoid UB.
440   // NOLINTNEXTLINE(*-narrowing-conversions)
441   return (UINT64_C(1) << (N - 1)) - 1;
442 }
443 
444 /// Checks if an unsigned integer fits into the given (dynamic) bit width.
445 inline bool isUIntN(unsigned N, uint64_t x) {
446   return N >= 64 || x <= maxUIntN(N);
447 }
448 
449 /// Checks if an signed integer fits into the given (dynamic) bit width.
450 inline bool isIntN(unsigned N, int64_t x) {
451   return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
452 }
453 
454 /// Return true if the argument is a non-empty sequence of ones starting at the
455 /// least significant bit with the remainder zero (32 bit version).
456 /// Ex. isMask_32(0x0000FFFFU) == true.
457 constexpr inline bool isMask_32(uint32_t Value) {
458   return Value && ((Value + 1) & Value) == 0;
459 }
460 
461 /// Return true if the argument is a non-empty sequence of ones starting at the
462 /// least significant bit with the remainder zero (64 bit version).
463 constexpr inline bool isMask_64(uint64_t Value) {
464   return Value && ((Value + 1) & Value) == 0;
465 }
466 
467 /// Return true if the argument contains a non-empty sequence of ones with the
468 /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
469 constexpr inline bool isShiftedMask_32(uint32_t Value) {
470   return Value && isMask_32((Value - 1) | Value);
471 }
472 
473 /// Return true if the argument contains a non-empty sequence of ones with the
474 /// remainder zero (64 bit version.)
475 constexpr inline bool isShiftedMask_64(uint64_t Value) {
476   return Value && isMask_64((Value - 1) | Value);
477 }
478 
479 /// Return true if the argument is a power of two > 0.
480 /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
481 constexpr inline bool isPowerOf2_32(uint32_t Value) {
482   return Value && !(Value & (Value - 1));
483 }
484 
485 /// Return true if the argument is a power of two > 0 (64 bit edition.)
486 constexpr inline bool isPowerOf2_64(uint64_t Value) {
487   return Value && !(Value & (Value - 1));
488 }
489 
490 /// Count the number of ones from the most significant bit to the first
491 /// zero bit.
492 ///
493 /// Ex. countLeadingOnes(0xFF0FFF00) == 8.
494 /// Only unsigned integral types are allowed.
495 ///
496 /// \param ZB the behavior on an input of all ones. Only ZB_Width and
497 /// ZB_Undefined are valid arguments.
498 template <typename T>
499 std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
500   static_assert(
501       std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
502       "Only unsigned integral types are allowed.");
503   return countLeadingZeros<T>(~Value, ZB);
504 }
505 
506 /// Count the number of ones from the least significant bit to the first
507 /// zero bit.
508 ///
509 /// Ex. countTrailingOnes(0x00FF00FF) == 8.
510 /// Only unsigned integral types are allowed.
511 ///
512 /// \param ZB the behavior on an input of all ones. Only ZB_Width and
513 /// ZB_Undefined are valid arguments.
514 template <typename T>
515 std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
516   static_assert(
517       std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
518       "Only unsigned integral types are allowed.");
519   return countTrailingZeros<T>(~Value, ZB);
520 }
521 
522 namespace detail {
523 template <typename T, std::size_t SizeOfT>
524 struct PopulationCounter {
525   static unsigned count(T Value) {
526     // Generic version, forward to 32 bits.
527     static_assert(SizeOfT <= 4, "Not implemented!");
528 #if defined(__GNUC__) && __GNUC__ >= 4
529     return __builtin_popcount(Value);
530 #else
531     uint32_t v = Value;
532     v = v - ((v >> 1) & 0x55555555);
533     v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
534     return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
535 #endif
536   }
537 };
538 
539 template <typename T>
540 struct PopulationCounter<T, 8> {
541   static unsigned count(T Value) {
542 #if defined(__GNUC__) && __GNUC__ >= 4
543     return __builtin_popcountll(Value);
544 #else
545     uint64_t v = Value;
546     v = v - ((v >> 1) & 0x5555555555555555ULL);
547     v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
548     v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
549     return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
550 #endif
551   }
552 };
553 } // namespace detail
554 
555 /// Count the number of set bits in a value.
556 /// Ex. countPopulation(0xF000F000) = 8
557 /// Returns 0 if the word is zero.
558 template <typename T>
559 inline unsigned countPopulation(T Value) {
560   static_assert(
561       std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
562       "Only unsigned integral types are allowed.");
563   return detail::PopulationCounter<T, sizeof(T)>::count(Value);
564 }
565 
566 /// Return the log base 2 of the specified value.
567 inline double Log2(double Value) {
568 #if defined(__ANDROID_API__) && __ANDROID_API__ < 18
569   return __builtin_log(Value) / __builtin_log(2.0);
570 #else
571   return log2(Value);
572 #endif
573 }
574 
575 /// Return the floor log base 2 of the specified value, -1 if the value is zero.
576 /// (32 bit edition.)
577 /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
578 inline unsigned Log2_32(uint32_t Value) {
579   return static_cast<unsigned>(31 - countLeadingZeros(Value));
580 }
581 
582 /// Return the floor log base 2 of the specified value, -1 if the value is zero.
583 /// (64 bit edition.)
584 inline unsigned Log2_64(uint64_t Value) {
585   return static_cast<unsigned>(63 - countLeadingZeros(Value));
586 }
587 
588 /// Return the ceil log base 2 of the specified value, 32 if the value is zero.
589 /// (32 bit edition).
590 /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
591 inline unsigned Log2_32_Ceil(uint32_t Value) {
592   return static_cast<unsigned>(32 - countLeadingZeros(Value - 1));
593 }
594 
595 /// Return the ceil log base 2 of the specified value, 64 if the value is zero.
596 /// (64 bit edition.)
597 inline unsigned Log2_64_Ceil(uint64_t Value) {
598   return static_cast<unsigned>(64 - countLeadingZeros(Value - 1));
599 }
600 
601 /// Return the greatest common divisor of the values using Euclid's algorithm.
602 inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
603   while (B) {
604     uint64_t T = B;
605     B = A % B;
606     A = T;
607   }
608   return A;
609 }
610 
611 /// This function takes a 64-bit integer and returns the bit equivalent double.
612 inline double BitsToDouble(uint64_t Bits) {
613   // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
614   double D;
615   static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
616   memcpy(&D, &Bits, sizeof(Bits));
617   return D;
618 }
619 
620 /// This function takes a 32-bit integer and returns the bit equivalent float.
621 inline float BitsToFloat(uint32_t Bits) {
622   // TODO: Use std::bit_cast once C++20 becomes available.
623   return c10::bit_cast<float>(Bits);
624 }
625 
626 /// This function takes a double and returns the bit equivalent 64-bit integer.
627 /// Note that copying doubles around changes the bits of NaNs on some hosts,
628 /// notably x86, so this routine cannot be used if these bits are needed.
629 inline uint64_t DoubleToBits(double Double) {
630   // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
631   uint64_t Bits;
632   static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
633   memcpy(&Bits, &Double, sizeof(Double));
634   return Bits;
635 }
636 
637 /// This function takes a float and returns the bit equivalent 32-bit integer.
638 /// Note that copying floats around changes the bits of NaNs on some hosts,
639 /// notably x86, so this routine cannot be used if these bits are needed.
640 inline uint32_t FloatToBits(float Float) {
641   // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
642   uint32_t Bits;
643   static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
644   memcpy(&Bits, &Float, sizeof(Float));
645   return Bits;
646 }
647 
648 /// A and B are either alignments or offsets. Return the minimum alignment that
649 /// may be assumed after adding the two together.
650 constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
651   // The largest power of 2 that divides both A and B.
652   //
653   // Replace "-Value" by "1+~Value" in the following commented code to avoid
654   // MSVC warning C4146
655   //    return (A | B) & -(A | B);
656   return (A | B) & (1 + ~(A | B));
657 }
658 
659 /// Aligns \c Addr to \c Alignment bytes, rounding up.
660 ///
661 /// Alignment should be a power of two.  This method rounds up, so
662 /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
663 inline uintptr_t alignAddr(const void* Addr, size_t Alignment) {
664   assert(
665       Alignment && isPowerOf2_64((uint64_t)Alignment) &&
666       "Alignment is not a power of two!");
667 
668   assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
669 
670   return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
671 }
672 
673 /// Returns the necessary adjustment for aligning \c Ptr to \c Alignment
674 /// bytes, rounding up.
675 inline size_t alignmentAdjustment(const void* Ptr, size_t Alignment) {
676   return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
677 }
678 
679 /// Returns the next power of two (in 64-bits) that is strictly greater than A.
680 /// Returns zero on overflow.
681 inline uint64_t NextPowerOf2(uint64_t A) {
682   A |= (A >> 1);
683   A |= (A >> 2);
684   A |= (A >> 4);
685   A |= (A >> 8);
686   A |= (A >> 16);
687   A |= (A >> 32);
688   return A + 1;
689 }
690 
691 /// Returns the power of two which is less than or equal to the given value.
692 /// Essentially, it is a floor operation across the domain of powers of two.
693 inline uint64_t PowerOf2Floor(uint64_t A) {
694   if (!A)
695     return 0;
696   return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
697 }
698 
699 /// Returns the power of two which is greater than or equal to the given value.
700 /// Essentially, it is a ceil operation across the domain of powers of two.
701 inline uint64_t PowerOf2Ceil(uint64_t A) {
702   if (!A)
703     return 0;
704   return NextPowerOf2(A - 1);
705 }
706 
707 /// Returns the next integer (mod 2**64) that is greater than or equal to
708 /// \p Value and is a multiple of \p Align. \p Align must be non-zero.
709 ///
710 /// If non-zero \p Skew is specified, the return value will be a minimal
711 /// integer that is greater than or equal to \p Value and equal to
712 /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
713 /// \p Align, its value is adjusted to '\p Skew mod \p Align'.
714 ///
715 /// Examples:
716 /// \code
717 ///   alignTo(5, 8) = 8
718 ///   alignTo(17, 8) = 24
719 ///   alignTo(~0LL, 8) = 0
720 ///   alignTo(321, 255) = 510
721 ///
722 ///   alignTo(5, 8, 7) = 7
723 ///   alignTo(17, 8, 1) = 17
724 ///   alignTo(~0LL, 8, 3) = 3
725 ///   alignTo(321, 255, 42) = 552
726 /// \endcode
727 inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
728   assert(Align != 0u && "Align can't be 0.");
729   Skew %= Align;
730   return (Value + Align - 1 - Skew) / Align * Align + Skew;
731 }
732 
733 /// Returns the next integer (mod 2**64) that is greater than or equal to
734 /// \p Value and is a multiple of \c Align. \c Align must be non-zero.
735 template <uint64_t Align>
736 constexpr inline uint64_t alignTo(uint64_t Value) {
737   static_assert(Align != 0u, "Align must be non-zero");
738   return (Value + Align - 1) / Align * Align;
739 }
740 
741 /// Returns the integer ceil(Numerator / Denominator).
742 inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
743   return alignTo(Numerator, Denominator) / Denominator;
744 }
745 
746 /// \c alignTo for contexts where a constant expression is required.
747 /// \sa alignTo
748 ///
749 /// \todo FIXME: remove when \c constexpr becomes really \c constexpr
750 template <uint64_t Align>
751 struct AlignTo {
752   static_assert(Align != 0u, "Align must be non-zero");
753   template <uint64_t Value>
754   struct from_value {
755     static const uint64_t value = (Value + Align - 1) / Align * Align;
756   };
757 };
758 
759 /// Returns the largest uint64_t less than or equal to \p Value and is
760 /// \p Skew mod \p Align. \p Align must be non-zero
761 inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
762   assert(Align != 0u && "Align can't be 0.");
763   Skew %= Align;
764   return (Value - Skew) / Align * Align + Skew;
765 }
766 
767 /// Returns the offset to the next integer (mod 2**64) that is greater than
768 /// or equal to \p Value and is a multiple of \p Align. \p Align must be
769 /// non-zero.
770 inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
771   return alignTo(Value, Align) - Value;
772 }
773 
774 /// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
775 /// Requires 0 < B <= 32.
776 template <unsigned B>
777 constexpr inline int32_t SignExtend32(uint32_t X) {
778   static_assert(B > 0, "Bit width can't be 0.");
779   static_assert(B <= 32, "Bit width out of range.");
780   return int32_t(X << (32 - B)) >> (32 - B);
781 }
782 
783 /// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
784 /// Requires 0 < B < 32.
785 inline int32_t SignExtend32(uint32_t X, unsigned B) {
786   assert(B > 0 && "Bit width can't be 0.");
787   assert(B <= 32 && "Bit width out of range.");
788   return int32_t(X << (32 - B)) >> (32 - B);
789 }
790 
791 /// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
792 /// Requires 0 < B < 64.
793 template <unsigned B>
794 constexpr inline int64_t SignExtend64(uint64_t x) {
795   static_assert(B > 0, "Bit width can't be 0.");
796   static_assert(B <= 64, "Bit width out of range.");
797   return int64_t(x << (64 - B)) >> (64 - B);
798 }
799 
800 /// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
801 /// Requires 0 < B < 64.
802 inline int64_t SignExtend64(uint64_t X, unsigned B) {
803   assert(B > 0 && "Bit width can't be 0.");
804   assert(B <= 64 && "Bit width out of range.");
805   return int64_t(X << (64 - B)) >> (64 - B);
806 }
807 
808 /// Subtract two unsigned integers, X and Y, of type T and return the absolute
809 /// value of the result.
810 template <typename T>
811 std::enable_if_t<std::is_unsigned_v<T>, T> AbsoluteDifference(T X, T Y) {
812   return std::max(X, Y) - std::min(X, Y);
813 }
814 
815 /// Add two unsigned integers, X and Y, of type T.  Clamp the result to the
816 /// maximum representable value of T on overflow.  ResultOverflowed indicates if
817 /// the result is larger than the maximum representable value of type T.
818 template <typename T>
819 std::enable_if_t<std::is_unsigned_v<T>, T> SaturatingAdd(
820     T X,
821     T Y,
822     bool* ResultOverflowed = nullptr) {
823   // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
824   bool Dummy;
825   bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
826   // Hacker's Delight, p. 29
827   T Z = X + Y;
828   Overflowed = (Z < X || Z < Y);
829   if (Overflowed)
830     return std::numeric_limits<T>::max();
831   else
832     return Z;
833 }
834 
835 /// Multiply two unsigned integers, X and Y, of type T.  Clamp the result to the
836 /// maximum representable value of T on overflow.  ResultOverflowed indicates if
837 /// the result is larger than the maximum representable value of type T.
838 template <typename T>
839 std::enable_if_t<std::is_unsigned_v<T>, T> SaturatingMultiply(
840     T X,
841     T Y,
842     bool* ResultOverflowed = nullptr) {
843   // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
844   bool Dummy;
845   bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
846 
847   // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
848   // because it fails for uint16_t (where multiplication can have undefined
849   // behavior due to promotion to int), and requires a division in addition
850   // to the multiplication.
851 
852   Overflowed = false;
853 
854   // Log2(Z) would be either Log2Z or Log2Z + 1.
855   // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
856   // will necessarily be less than Log2Max as desired.
857   int Log2Z = Log2_64(X) + Log2_64(Y);
858   const T Max = std::numeric_limits<T>::max();
859   int Log2Max = Log2_64(Max);
860   if (Log2Z < Log2Max) {
861     return X * Y;
862   }
863   if (Log2Z > Log2Max) {
864     Overflowed = true;
865     return Max;
866   }
867 
868   // We're going to use the top bit, and maybe overflow one
869   // bit past it. Multiply all but the bottom bit then add
870   // that on at the end.
871   T Z = (X >> 1) * Y;
872   if (Z & ~(Max >> 1)) {
873     Overflowed = true;
874     return Max;
875   }
876   Z <<= 1;
877   if (X & 1)
878     return SaturatingAdd(Z, Y, ResultOverflowed);
879 
880   return Z;
881 }
882 
883 /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
884 /// the product. Clamp the result to the maximum representable value of T on
885 /// overflow. ResultOverflowed indicates if the result is larger than the
886 /// maximum representable value of type T.
887 template <typename T>
888 std::enable_if_t<std::is_unsigned_v<T>, T> SaturatingMultiplyAdd(
889     T X,
890     T Y,
891     T A,
892     bool* ResultOverflowed = nullptr) {
893   // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
894   bool Dummy;
895   bool& Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
896 
897   T Product = SaturatingMultiply(X, Y, &Overflowed);
898   if (Overflowed)
899     return Product;
900 
901   return SaturatingAdd(A, Product, &Overflowed);
902 }
903 
904 /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
905 extern const float huge_valf;
906 } // namespace c10::llvm
907