xref: /aosp_15_r20/external/skia/src/base/SkVx.h (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SKVX_DEFINED
9 #define SKVX_DEFINED
10 
11 // skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
12 //
13 // This time we're leaning a bit less on platform-specific intrinsics and a bit
14 // more on Clang/GCC vector extensions, but still keeping the option open to
15 // drop in platform-specific intrinsics, actually more easily than before.
16 //
17 // We've also fixed a few of the caveats that used to make SkNx awkward to work
18 // with across translation units.  skvx::Vec<N,T> always has N*sizeof(T) size
19 // and alignment and is safe to use across translation units freely.
20 // (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.)
21 
22 #include "include/private/base/SkFeatures.h"
23 #include "src/base/SkUtils.h"
24 #include <algorithm>         // std::min, std::max
25 #include <cassert>           // assert()
26 #include <cmath>             // ceilf, floorf, truncf, roundf, sqrtf, etc.
27 #include <cstdint>           // intXX_t
28 #include <cstring>           // memcpy()
29 #include <initializer_list>  // std::initializer_list
30 #include <type_traits>
31 #include <utility>           // std::index_sequence
32 
33 // Users may disable SIMD with SKNX_NO_SIMD, which may be set via compiler flags.
34 // The gn build has no option which sets SKNX_NO_SIMD.
35 // Use SKVX_USE_SIMD internally to avoid confusing double negation.
36 // Do not use 'defined' in a macro expansion.
37 #if !defined(SKNX_NO_SIMD)
38     #define SKVX_USE_SIMD 1
39 #else
40     #define SKVX_USE_SIMD 0
41 #endif
42 
43 #if SKVX_USE_SIMD
44     #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
45         #include <immintrin.h>
46     #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
47         #include <smmintrin.h>
48     #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
49         #include <xmmintrin.h>
50     #elif defined(SK_ARM_HAS_NEON)
51         #include <arm_neon.h>
52     #elif defined(__wasm_simd128__)
53         #include <wasm_simd128.h>
54     #elif SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
55         #include <lasxintrin.h>
56         #include <lsxintrin.h>
57     #elif SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
58         #include <lsxintrin.h>
59     #endif
60 #endif
61 
62 // To avoid ODR violations, all methods must be force-inlined...
63 #if defined(_MSC_VER)
64     #define SKVX_ALWAYS_INLINE __forceinline
65 #else
66     #define SKVX_ALWAYS_INLINE __attribute__((always_inline))
67 #endif
68 
69 // ... and all standalone functions must be static.  Please use these helpers:
70 #define SI    static inline
71 #define SIT   template <       typename T> SI
72 #define SIN   template <int N            > SI
73 #define SINT  template <int N, typename T> SI
74 #define SINTU template <int N, typename T, typename U, \
75                         typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI
76 
77 namespace skvx {
78 
79 template <int N, typename T>
80 struct alignas(N*sizeof(T)) Vec;
81 
82 template <int... Ix, int N, typename T>
83 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&);
84 
85 // All Vec have the same simple memory layout, the same as `T vec[N]`.
86 template <int N, typename T>
87 struct alignas(N*sizeof(T)) Vec {
88     static_assert((N & (N-1)) == 0,        "N must be a power of 2.");
89     static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
90 
91     // Methods belong here in the class declaration of Vec only if:
92     //   - they must be here, like constructors or operator[];
93     //   - they'll definitely never want a specialized implementation.
94     // Other operations on Vec should be defined outside the type.
95 
96     SKVX_ALWAYS_INLINE Vec() = default;
VecVec97     SKVX_ALWAYS_INLINE Vec(T s) : lo(s), hi(s) {}
98 
99     // NOTE: Vec{x} produces x000..., whereas Vec(x) produces xxxx.... since this constructor fills
100     // unspecified lanes with 0s, whereas the single T constructor fills all lanes with the value.
VecVec101     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
102         T vals[N] = {0};
103         assert(xs.size() <= (size_t)N);
104         memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
105 
106         this->lo = Vec<N/2,T>::Load(vals +   0);
107         this->hi = Vec<N/2,T>::Load(vals + N/2);
108     }
109 
110     SKVX_ALWAYS_INLINE T  operator[](int i) const { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
111     SKVX_ALWAYS_INLINE T& operator[](int i)       { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
112 
LoadVec113     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
114         return sk_unaligned_load<Vec>(ptr);
115     }
storeVec116     SKVX_ALWAYS_INLINE void store(void* ptr) const {
117         // Note: Calling sk_unaligned_store produces slightly worse code here, for some reason
118         memcpy(ptr, this, sizeof(Vec));
119     }
120 
121     Vec<N/2,T> lo, hi;
122 };
123 
124 // We have specializations for N == 1 (the base-case), as well as 2 and 4, where we add helpful
125 // constructors and swizzle accessors.
126 template <typename T>
127 struct alignas(4*sizeof(T)) Vec<4,T> {
128     static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
129 
130     SKVX_ALWAYS_INLINE Vec() = default;
131     SKVX_ALWAYS_INLINE Vec(T s) : lo(s), hi(s) {}
132     SKVX_ALWAYS_INLINE Vec(T x, T y, T z, T w) : lo(x,y), hi(z,w) {}
133     SKVX_ALWAYS_INLINE Vec(Vec<2,T> xy, T z, T w) : lo(xy), hi(z,w) {}
134     SKVX_ALWAYS_INLINE Vec(T x, T y, Vec<2,T> zw) : lo(x,y), hi(zw) {}
135     SKVX_ALWAYS_INLINE Vec(Vec<2,T> xy, Vec<2,T> zw) : lo(xy), hi(zw) {}
136 
137     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
138         T vals[4] = {0};
139         assert(xs.size() <= (size_t)4);
140         memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)4)*sizeof(T));
141 
142         this->lo = Vec<2,T>::Load(vals + 0);
143         this->hi = Vec<2,T>::Load(vals + 2);
144     }
145 
146     SKVX_ALWAYS_INLINE T  operator[](int i) const { return i<2 ? this->lo[i] : this->hi[i-2]; }
147     SKVX_ALWAYS_INLINE T& operator[](int i)       { return i<2 ? this->lo[i] : this->hi[i-2]; }
148 
149     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
150         return sk_unaligned_load<Vec>(ptr);
151     }
152     SKVX_ALWAYS_INLINE void store(void* ptr) const {
153         memcpy(ptr, this, sizeof(Vec));
154     }
155 
156     SKVX_ALWAYS_INLINE Vec<2,T>& xy() { return lo; }
157     SKVX_ALWAYS_INLINE Vec<2,T>& zw() { return hi; }
158     SKVX_ALWAYS_INLINE T& x() { return lo.lo.val; }
159     SKVX_ALWAYS_INLINE T& y() { return lo.hi.val; }
160     SKVX_ALWAYS_INLINE T& z() { return hi.lo.val; }
161     SKVX_ALWAYS_INLINE T& w() { return hi.hi.val; }
162 
163     SKVX_ALWAYS_INLINE Vec<2,T> xy() const { return lo; }
164     SKVX_ALWAYS_INLINE Vec<2,T> zw() const { return hi; }
165     SKVX_ALWAYS_INLINE T x() const { return lo.lo.val; }
166     SKVX_ALWAYS_INLINE T y() const { return lo.hi.val; }
167     SKVX_ALWAYS_INLINE T z() const { return hi.lo.val; }
168     SKVX_ALWAYS_INLINE T w() const { return hi.hi.val; }
169 
170     // Exchange-based swizzles. These should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
171     SKVX_ALWAYS_INLINE Vec<4,T> yxwz() const { return shuffle<1,0,3,2>(*this); }
172     SKVX_ALWAYS_INLINE Vec<4,T> zwxy() const { return shuffle<2,3,0,1>(*this); }
173 
174     Vec<2,T> lo, hi;
175 };
176 
177 template <typename T>
178 struct alignas(2*sizeof(T)) Vec<2,T> {
179     static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
180 
181     SKVX_ALWAYS_INLINE Vec() = default;
182     SKVX_ALWAYS_INLINE Vec(T s) : lo(s), hi(s) {}
183     SKVX_ALWAYS_INLINE Vec(T x, T y) : lo(x), hi(y) {}
184 
185     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
186         T vals[2] = {0};
187         assert(xs.size() <= (size_t)2);
188         memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)2)*sizeof(T));
189 
190         this->lo = Vec<1,T>::Load(vals + 0);
191         this->hi = Vec<1,T>::Load(vals + 1);
192     }
193 
194     SKVX_ALWAYS_INLINE T  operator[](int i) const { return i<1 ? this->lo[i] : this->hi[i-1]; }
195     SKVX_ALWAYS_INLINE T& operator[](int i)       { return i<1 ? this->lo[i] : this->hi[i-1]; }
196 
197     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
198         return sk_unaligned_load<Vec>(ptr);
199     }
200     SKVX_ALWAYS_INLINE void store(void* ptr) const {
201         memcpy(ptr, this, sizeof(Vec));
202     }
203 
204     SKVX_ALWAYS_INLINE T& x() { return lo.val; }
205     SKVX_ALWAYS_INLINE T& y() { return hi.val; }
206 
207     SKVX_ALWAYS_INLINE T x() const { return lo.val; }
208     SKVX_ALWAYS_INLINE T y() const { return hi.val; }
209 
210     // This exchange-based swizzle should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
211     SKVX_ALWAYS_INLINE Vec<2,T> yx() const { return shuffle<1,0>(*this); }
212     SKVX_ALWAYS_INLINE Vec<4,T> xyxy() const { return Vec<4,T>(*this, *this); }
213 
214     Vec<1,T> lo, hi;
215 };
216 
217 template <typename T>
218 struct Vec<1,T> {
219     T val = {};
220 
221     SKVX_ALWAYS_INLINE Vec() = default;
222     SKVX_ALWAYS_INLINE Vec(T s) : val(s) {}
223 
224     SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {
225         assert(xs.size() <= (size_t)1);
226     }
227 
228     SKVX_ALWAYS_INLINE T  operator[](int i) const { assert(i == 0); return val; }
229     SKVX_ALWAYS_INLINE T& operator[](int i)       { assert(i == 0); return val; }
230 
231     SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
232         return sk_unaligned_load<Vec>(ptr);
233     }
234     SKVX_ALWAYS_INLINE void store(void* ptr) const {
235         memcpy(ptr, this, sizeof(Vec));
236     }
237 };
238 
239 // Translate from a value type T to its corresponding Mask, the result of a comparison.
240 template <typename T> struct Mask { using type = T; };
241 template <> struct Mask<float > { using type = int32_t; };
242 template <> struct Mask<double> { using type = int64_t; };
243 template <typename T> using M = typename Mask<T>::type;
244 
245 // Join two Vec<N,T> into one Vec<2N,T>.
246 SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
247     Vec<2*N,T> v;
248     v.lo = lo;
249     v.hi = hi;
250     return v;
251 }
252 
253 // We have three strategies for implementing Vec operations:
254 //    1) lean on Clang/GCC vector extensions when available;
255 //    2) use map() to apply a scalar function lane-wise;
256 //    3) recurse on lo/hi to scalar portable implementations.
257 // We can slot in platform-specific implementations as overloads for particular Vec<N,T>,
258 // or often integrate them directly into the recursion of style 3), allowing fine control.
259 
260 #if SKVX_USE_SIMD && (defined(__clang__) || defined(__GNUC__))
261 
262     // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
263     #if defined(__clang__)
264         template <int N, typename T>
265         using VExt = T __attribute__((ext_vector_type(N)));
266 
267     #elif defined(__GNUC__)
268         template <int N, typename T>
269         struct VExtHelper {
270             typedef T __attribute__((vector_size(N*sizeof(T)))) type;
271         };
272 
273         template <int N, typename T>
274         using VExt = typename VExtHelper<N,T>::type;
275 
276         // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
277         // to_vec<N,T>() below for N=4 and T=float.  This workaround seems to help...
278         SI Vec<4,float> to_vec(VExt<4,float> v) { return sk_bit_cast<Vec<4,float>>(v); }
279     #endif
280 
281     SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return sk_bit_cast<VExt<N,T>>(v); }
282     SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return sk_bit_cast<Vec <N,T>>(v); }
283 
284     SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
285         return to_vec<N,T>(to_vext(x) + to_vext(y));
286     }
287     SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
288         return to_vec<N,T>(to_vext(x) - to_vext(y));
289     }
290     SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
291         return to_vec<N,T>(to_vext(x) * to_vext(y));
292     }
293     SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
294         return to_vec<N,T>(to_vext(x) / to_vext(y));
295     }
296 
297     SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
298         return to_vec<N,T>(to_vext(x) ^ to_vext(y));
299     }
300     SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
301         return to_vec<N,T>(to_vext(x) & to_vext(y));
302     }
303     SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
304         return to_vec<N,T>(to_vext(x) | to_vext(y));
305     }
306 
307     SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
308     SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
309     SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
310 
311     SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); }
312     SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); }
313 
314     SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
315         return sk_bit_cast<Vec<N,M<T>>>(to_vext(x) == to_vext(y));
316     }
317     SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
318         return sk_bit_cast<Vec<N,M<T>>>(to_vext(x) != to_vext(y));
319     }
320     SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
321         return sk_bit_cast<Vec<N,M<T>>>(to_vext(x) <= to_vext(y));
322     }
323     SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
324         return sk_bit_cast<Vec<N,M<T>>>(to_vext(x) >= to_vext(y));
325     }
326     SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
327         return sk_bit_cast<Vec<N,M<T>>>(to_vext(x) <  to_vext(y));
328     }
329     SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
330         return sk_bit_cast<Vec<N,M<T>>>(to_vext(x) >  to_vext(y));
331     }
332 
333 #else
334 
335     // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
336     // We'll implement things portably with N==1 scalar implementations and recursion onto them.
337 
338     // N == 1 scalar implementations.
339     SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
340     SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
341     SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
342     SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
343 
344     SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
345     SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
346     SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
347 
348     SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
349     SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
350     SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
351 
352     SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
353     SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }
354 
355     SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
356         return x.val == y.val ? ~0 : 0;
357     }
358     SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
359         return x.val != y.val ? ~0 : 0;
360     }
361     SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
362         return x.val <= y.val ? ~0 : 0;
363     }
364     SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
365         return x.val >= y.val ? ~0 : 0;
366     }
367     SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
368         return x.val <  y.val ? ~0 : 0;
369     }
370     SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
371         return x.val >  y.val ? ~0 : 0;
372     }
373 
374     // Recurse on lo/hi down to N==1 scalar implementations.
375     SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
376         return join(x.lo + y.lo, x.hi + y.hi);
377     }
378     SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
379         return join(x.lo - y.lo, x.hi - y.hi);
380     }
381     SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
382         return join(x.lo * y.lo, x.hi * y.hi);
383     }
384     SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
385         return join(x.lo / y.lo, x.hi / y.hi);
386     }
387 
388     SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
389         return join(x.lo ^ y.lo, x.hi ^ y.hi);
390     }
391     SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
392         return join(x.lo & y.lo, x.hi & y.hi);
393     }
394     SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
395         return join(x.lo | y.lo, x.hi | y.hi);
396     }
397 
398     SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
399     SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
400     SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
401 
402     SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
403     SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }
404 
405     SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
406         return join(x.lo == y.lo, x.hi == y.hi);
407     }
408     SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
409         return join(x.lo != y.lo, x.hi != y.hi);
410     }
411     SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
412         return join(x.lo <= y.lo, x.hi <= y.hi);
413     }
414     SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
415         return join(x.lo >= y.lo, x.hi >= y.hi);
416     }
417     SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
418         return join(x.lo <  y.lo, x.hi <  y.hi);
419     }
420     SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
421         return join(x.lo >  y.lo, x.hi >  y.hi);
422     }
423 #endif
424 
425 // Scalar/vector operations splat the scalar to a vector.
426 SINTU Vec<N,T>    operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) +  y; }
427 SINTU Vec<N,T>    operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) -  y; }
428 SINTU Vec<N,T>    operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) *  y; }
429 SINTU Vec<N,T>    operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) /  y; }
430 SINTU Vec<N,T>    operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^  y; }
431 SINTU Vec<N,T>    operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) &  y; }
432 SINTU Vec<N,T>    operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) |  y; }
433 SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
434 SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
435 SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
436 SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
437 SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) <  y; }
438 SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) >  y; }
439 
440 SINTU Vec<N,T>    operator+ (const Vec<N,T>& x, U y) { return x +  Vec<N,T>(y); }
441 SINTU Vec<N,T>    operator- (const Vec<N,T>& x, U y) { return x -  Vec<N,T>(y); }
442 SINTU Vec<N,T>    operator* (const Vec<N,T>& x, U y) { return x *  Vec<N,T>(y); }
443 SINTU Vec<N,T>    operator/ (const Vec<N,T>& x, U y) { return x /  Vec<N,T>(y); }
444 SINTU Vec<N,T>    operator^ (const Vec<N,T>& x, U y) { return x ^  Vec<N,T>(y); }
445 SINTU Vec<N,T>    operator& (const Vec<N,T>& x, U y) { return x &  Vec<N,T>(y); }
446 SINTU Vec<N,T>    operator| (const Vec<N,T>& x, U y) { return x |  Vec<N,T>(y); }
447 SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
448 SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
449 SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
450 SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
451 SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x <  Vec<N,T>(y); }
452 SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x >  Vec<N,T>(y); }
453 
454 SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
455 SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
456 SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
457 SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
458 SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
459 SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
460 SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
461 
462 SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
463 SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
464 SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
465 SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
466 SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
467 SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
468 SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
469 
470 SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
471 SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
472 
473 // Some operations we want are not expressible with Clang/GCC vector extensions.
474 
475 // Clang can reason about naive_if_then_else() and optimize through it better
476 // than if_then_else(), so it's sometimes useful to call it directly when we
477 // think an entire expression should optimize away, e.g. min()/max().
478 SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
479     return sk_bit_cast<Vec<N,T>>(( cond & sk_bit_cast<Vec<N, M<T>>>(t)) |
480                                  (~cond & sk_bit_cast<Vec<N, M<T>>>(e)) );
481 }
482 
483 SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
484     // In practice this scalar implementation is unlikely to be used.  See next if_then_else().
485     return sk_bit_cast<Vec<1,T>>(( cond & sk_bit_cast<Vec<1, M<T>>>(t)) |
486                                  (~cond & sk_bit_cast<Vec<1, M<T>>>(e)) );
487 }
488 SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
489     // Specializations inline here so they can generalize what types the apply to.
490 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
491     if constexpr (N*sizeof(T) == 32) {
492         return sk_bit_cast<Vec<N,T>>(_mm256_blendv_epi8(sk_bit_cast<__m256i>(e),
493                                                         sk_bit_cast<__m256i>(t),
494                                                         sk_bit_cast<__m256i>(cond)));
495     }
496 #endif
497 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
498     if constexpr (N*sizeof(T) == 16) {
499         return sk_bit_cast<Vec<N,T>>(_mm_blendv_epi8(sk_bit_cast<__m128i>(e),
500                                                      sk_bit_cast<__m128i>(t),
501                                                      sk_bit_cast<__m128i>(cond)));
502     }
503 #endif
504 #if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
505     if constexpr (N*sizeof(T) == 16) {
506         return sk_bit_cast<Vec<N,T>>(vbslq_u8(sk_bit_cast<uint8x16_t>(cond),
507                                               sk_bit_cast<uint8x16_t>(t),
508                                               sk_bit_cast<uint8x16_t>(e)));
509     }
510 #endif
511 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
512     if constexpr (N*sizeof(T) == 32) {
513         return sk_bit_cast<Vec<N,T>>(__lasx_xvbitsel_v(sk_bit_cast<__m256i>(e),
514                                                        sk_bit_cast<__m256i>(t),
515                                                        sk_bit_cast<__m256i>(cond)));
516     }
517 #endif
518 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
519     if constexpr (N*sizeof(T) == 16) {
520         return sk_bit_cast<Vec<N,T>>(__lsx_vbitsel_v(sk_bit_cast<__m128i>(e),
521                                                      sk_bit_cast<__m128i>(t),
522                                                      sk_bit_cast<__m128i>(cond)));
523     }
524 #endif
525     // Recurse for large vectors to try to hit the specializations above.
526     if constexpr (N*sizeof(T) > 16) {
527         return join(if_then_else(cond.lo, t.lo, e.lo),
528                     if_then_else(cond.hi, t.hi, e.hi));
529     }
530     // This default can lead to better code than the recursing onto scalars.
531     return naive_if_then_else(cond, t, e);
532 }
533 
534 SIT  bool any(const Vec<1,T>& x) { return x.val != 0; }
535 SINT bool any(const Vec<N,T>& x) {
536     // For any(), the _mm_testz intrinsics are correct and don't require comparing 'x' to 0, so it's
537     // lower latency compared to _mm_movemask + _mm_compneq on plain SSE.
538 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
539     if constexpr (N*sizeof(T) == 32) {
540         return !_mm256_testz_si256(sk_bit_cast<__m256i>(x), _mm256_set1_epi32(-1));
541     }
542 #endif
543 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
544     if constexpr (N*sizeof(T) == 16) {
545         return !_mm_testz_si128(sk_bit_cast<__m128i>(x), _mm_set1_epi32(-1));
546     }
547 #endif
548 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
549     if constexpr (N*sizeof(T) == 16) {
550         // On SSE, movemask checks only the MSB in each lane, which is fine if the lanes were set
551         // directly from a comparison op (which sets all bits to 1 when true), but skvx::Vec<>
552         // treats any non-zero value as true, so we have to compare 'x' to 0 before calling movemask
553         return _mm_movemask_ps(_mm_cmpneq_ps(sk_bit_cast<__m128>(x), _mm_set1_ps(0))) != 0b0000;
554     }
555 #endif
556 #if SKVX_USE_SIMD && defined(__aarch64__)
557     // On 64-bit NEON, take the max across lanes, which will be non-zero if any lane was true.
558     // The specific lane-size doesn't really matter in this case since it's really any set bit
559     // that we're looking for.
560     if constexpr (N*sizeof(T) == 8 ) { return vmaxv_u8 (sk_bit_cast<uint8x8_t> (x)) > 0; }
561     if constexpr (N*sizeof(T) == 16) { return vmaxvq_u8(sk_bit_cast<uint8x16_t>(x)) > 0; }
562 #endif
563 #if SKVX_USE_SIMD && defined(__wasm_simd128__)
564     if constexpr (N == 4 && sizeof(T) == 4) {
565         return wasm_i32x4_any_true(sk_bit_cast<VExt<4,int>>(x));
566     }
567 #endif
568 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
569     if constexpr (N*sizeof(T) == 32) {
570         v8i32 retv = (v8i32)__lasx_xvmskltz_w(__lasx_xvslt_wu(__lasx_xvldi(0),
571                                                               sk_bit_cast<__m256i>(x)));
572         return (retv[0] | retv[4]) != 0b0000;
573     }
574 #endif
575 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
576     if constexpr (N*sizeof(T) == 16) {
577         v4i32 retv = (v4i32)__lsx_vmskltz_w(__lsx_vslt_wu(__lsx_vldi(0),
578                                                           sk_bit_cast<__m128i>(x)));
579         return retv[0] != 0b0000;
580     }
581 #endif
582     return any(x.lo)
583         || any(x.hi);
584 }
585 
586 SIT  bool all(const Vec<1,T>& x) { return x.val != 0; }
587 SINT bool all(const Vec<N,T>& x) {
588 // Unlike any(), we have to respect the lane layout, or we'll miss cases where a
589 // true lane has a mix of 0 and 1 bits.
590 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
591     // Unfortunately, the _mm_testc intrinsics don't let us avoid the comparison to 0 for all()'s
592     // correctness, so always just use the plain SSE version.
593     if constexpr (N == 4 && sizeof(T) == 4) {
594         return _mm_movemask_ps(_mm_cmpneq_ps(sk_bit_cast<__m128>(x), _mm_set1_ps(0))) == 0b1111;
595     }
596 #endif
597 #if SKVX_USE_SIMD && defined(__aarch64__)
598     // On 64-bit NEON, take the min across the lanes, which will be non-zero if all lanes are != 0.
599     if constexpr (sizeof(T)==1 && N==8)  {return vminv_u8  (sk_bit_cast<uint8x8_t> (x)) > 0;}
600     if constexpr (sizeof(T)==1 && N==16) {return vminvq_u8 (sk_bit_cast<uint8x16_t>(x)) > 0;}
601     if constexpr (sizeof(T)==2 && N==4)  {return vminv_u16 (sk_bit_cast<uint16x4_t>(x)) > 0;}
602     if constexpr (sizeof(T)==2 && N==8)  {return vminvq_u16(sk_bit_cast<uint16x8_t>(x)) > 0;}
603     if constexpr (sizeof(T)==4 && N==2)  {return vminv_u32 (sk_bit_cast<uint32x2_t>(x)) > 0;}
604     if constexpr (sizeof(T)==4 && N==4)  {return vminvq_u32(sk_bit_cast<uint32x4_t>(x)) > 0;}
605 #endif
606 #if SKVX_USE_SIMD && defined(__wasm_simd128__)
607     if constexpr (N == 4 && sizeof(T) == 4) {
608         return wasm_i32x4_all_true(sk_bit_cast<VExt<4,int>>(x));
609     }
610 #endif
611 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
612     if constexpr (N == 8 && sizeof(T) == 4) {
613         v8i32 retv = (v8i32)__lasx_xvmskltz_w(__lasx_xvslt_wu(__lasx_xvldi(0),
614                                                               sk_bit_cast<__m256i>(x)));
615         return (retv[0] & retv[4]) == 0b1111;
616     }
617 #endif
618 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
619     if constexpr (N == 4 && sizeof(T) == 4) {
620         v4i32 retv = (v4i32)__lsx_vmskltz_w(__lsx_vslt_wu(__lsx_vldi(0),
621                                                           sk_bit_cast<__m128i>(x)));
622         return retv[0] == 0b1111;
623     }
624 #endif
625     return all(x.lo)
626         && all(x.hi);
627 }
628 
629 // cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
630 // TODO: implement with map()?
631 template <typename D, typename S>
632 SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
633 
634 template <typename D, int N, typename S>
635 SI Vec<N,D> cast(const Vec<N,S>& src) {
636 #if SKVX_USE_SIMD && defined(__clang__)
637     return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
638 #else
639     return join(cast<D>(src.lo), cast<D>(src.hi));
640 #endif
641 }
642 
643 // min/max match logic of std::min/std::max, which is important when NaN is involved.
644 SIT  T min(const Vec<1,T>& x) { return x.val; }
645 SIT  T max(const Vec<1,T>& x) { return x.val; }
646 SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
647 SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
648 
649 SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(y < x, y, x); }
650 SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(x < y, y, x); }
651 
652 SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
653 SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
654 SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
655 SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
656 
657 // pin matches the logic of SkTPin, which is important when NaN is involved. It always returns
658 // values in the range lo..hi, and if x is NaN, it returns lo.
659 SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) {
660     return max(lo, min(x, hi));
661 }
662 
663 // Shuffle values from a vector pretty arbitrarily:
664 //    skvx::Vec<4,float> rgba = {R,G,B,A};
665 //    shuffle<2,1,0,3>        (rgba) ~> {B,G,R,A}
666 //    shuffle<2,1>            (rgba) ~> {B,G}
667 //    shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
668 //    shuffle<3,3,3,3>        (rgba) ~> {A,A,A,A}
669 // The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
670 template <int... Ix, int N, typename T>
671 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
672 #if SKVX_USE_SIMD && defined(__clang__)
673     // TODO: can we just always use { x[Ix]... }?
674     return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
675 #else
676     return { x[Ix]... };
677 #endif
678 }
679 
680 // Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... },
681 // or map(fn, x,y) for a vector of fn(x[i], y[i]), etc.
682 
683 template <typename Fn, typename... Args, size_t... I>
684 SI auto map(std::index_sequence<I...>,
685             Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> {
686     auto lane = [&](size_t i)
687 #if defined(__clang__)
688     // CFI, specifically -fsanitize=cfi-icall, seems to give a false positive here,
689     // with errors like "control flow integrity check for type 'float (float)
690     // noexcept' failed during indirect function call... note: sqrtf.cfi_jt defined
691     // here".  But we can be quite sure fn is the right type: it's all inferred!
692     // So, stifle CFI in this function.
693     __attribute__((no_sanitize("cfi")))
694 #endif
695     { return fn(args[static_cast<int>(i)]...); };
696 
697     return { lane(I)... };
698 }
699 
700 template <typename Fn, int N, typename T, typename... Rest>
701 auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) {
702     // Derive an {0...N-1} index_sequence from the size of the first arg: N lanes in, N lanes out.
703     return map(std::make_index_sequence<N>{}, fn, first,rest...);
704 }
705 
706 SIN Vec<N,float>  ceil(const Vec<N,float>& x) { return map( ceilf, x); }
707 SIN Vec<N,float> floor(const Vec<N,float>& x) { return map(floorf, x); }
708 SIN Vec<N,float> trunc(const Vec<N,float>& x) { return map(truncf, x); }
709 SIN Vec<N,float> round(const Vec<N,float>& x) { return map(roundf, x); }
710 SIN Vec<N,float>  sqrt(const Vec<N,float>& x) { return map( sqrtf, x); }
711 SIN Vec<N,float>   abs(const Vec<N,float>& x) { return map( fabsf, x); }
712 SIN Vec<N,float>   fma(const Vec<N,float>& x,
713                        const Vec<N,float>& y,
714                        const Vec<N,float>& z) {
715     // I don't understand why Clang's codegen is terrible if we write map(fmaf, x,y,z) directly.
716     auto fn = [](float x, float y, float z) { return fmaf(x,y,z); };
717     return map(fn, x,y,z);
718 }
719 
720 SI Vec<1,int> lrint(const Vec<1,float>& x) {
721     return (int)lrintf(x.val);
722 }
723 SIN Vec<N,int> lrint(const Vec<N,float>& x) {
724 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
725     if constexpr (N == 8) {
726         return sk_bit_cast<Vec<N,int>>(_mm256_cvtps_epi32(sk_bit_cast<__m256>(x)));
727     }
728 #endif
729 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
730     if constexpr (N == 4) {
731         return sk_bit_cast<Vec<N,int>>(_mm_cvtps_epi32(sk_bit_cast<__m128>(x)));
732     }
733 #endif
734 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
735     if constexpr (N == 8) {
736         return sk_bit_cast<Vec<N,int>>(__lasx_xvftint_w_s(sk_bit_cast<__m256>(x)));
737     }
738 #endif
739 #if SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
740     if constexpr (N == 4) {
741         return sk_bit_cast<Vec<N,int>>(__lsx_vftint_w_s(sk_bit_cast<__m128>(x)));
742     }
743 #endif
744     return join(lrint(x.lo),
745                 lrint(x.hi));
746 }
747 
748 SIN Vec<N,float> fract(const Vec<N,float>& x) { return x - floor(x); }
749 
750 // Converts float to half, rounding to nearest even, and supporting de-normal f16 conversion,
751 // and overflow to f16 infinity. Should not be called with NaNs, since it can convert NaN->inf.
752 // KEEP IN SYNC with skcms' Half_from_F to ensure that f16 colors are computed consistently in both
753 // skcms and skvx.
754 SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
755     assert(all(x == x)); // No NaNs should reach this function
756 
757     // Intrinsics for float->half tend to operate on 4 lanes, and the default implementation has
758     // enough instructions that it's better to split and join on 128 bits groups vs.
759     // recursing for each min/max/shift/etc.
760     if constexpr (N > 4) {
761         return join(to_half(x.lo),
762                     to_half(x.hi));
763     }
764 
765 #if SKVX_USE_SIMD && defined(__aarch64__)
766     if constexpr (N == 4) {
767         return sk_bit_cast<Vec<N,uint16_t>>(vcvt_f16_f32(sk_bit_cast<float32x4_t>(x)));
768 
769     }
770 #endif
771 
772 #define I(x) sk_bit_cast<Vec<N,int32_t>>(x)
773 #define F(x) sk_bit_cast<Vec<N,float>>(x)
774     Vec<N,int32_t> sem = I(x),
775                    s   = sem & 0x8000'0000,
776                     em = min(sem ^ s, 0x4780'0000), // |x| clamped to f16 infinity
777                  // F(em)*8192 increases the exponent by 13, which when added back to em will shift
778                  // the mantissa bits 13 to the right. We clamp to 1/2 for subnormal values, which
779                  // automatically shifts the mantissa to match 2^-14 expected for a subnorm f16.
780                  magic = I(max(F(em) * 8192.f, 0.5f)) & (255 << 23),
781                rounded = I((F(em) + F(magic))), // shift mantissa with automatic round-to-even
782                    // Subtract 127 for f32 bias, subtract 13 to undo the *8192, subtract 1 to remove
783                    // the implicit leading 1., and add 15 to get the f16 biased exponent.
784                    exp = ((magic >> 13) - ((127-15+13+1)<<10)), // shift and re-bias exponent
785                    f16 = rounded + exp; // use + if 'rounded' rolled over into first exponent bit
786     return cast<uint16_t>((s>>16) | f16);
787 #undef I
788 #undef F
789 }
790 
791 // Converts from half to float, preserving NaN and +/- infinity.
792 // KEEP IN SYNC with skcms' F_from_Half to ensure that f16 colors are computed consistently in both
793 // skcms and skvx.
794 SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
795     if constexpr (N > 4) {
796         return join(from_half(x.lo),
797                     from_half(x.hi));
798     }
799 
800 #if SKVX_USE_SIMD && defined(__aarch64__)
801     if constexpr (N == 4) {
802         return sk_bit_cast<Vec<N,float>>(vcvt_f32_f16(sk_bit_cast<float16x4_t>(x)));
803     }
804 #endif
805 
806     Vec<N,int32_t> wide = cast<int32_t>(x),
807                       s  = wide & 0x8000,
808                       em = wide ^ s,
809               inf_or_nan =  (em >= (31 << 10)) & (255 << 23),  // Expands exponent to fill 8 bits
810                  is_norm =   em > 0x3ff,
811                      // subnormal f16's are 2^-14*0.[m0:9] == 2^-24*[m0:9].0
812                      sub = sk_bit_cast<Vec<N,int32_t>>((cast<float>(em) * (1.f/(1<<24)))),
813                     norm = ((em<<13) + ((127-15)<<23)), // Shifts mantissa, shifts + re-biases exp
814                   finite = (is_norm & norm) | (~is_norm & sub);
815     // If 'x' is f16 +/- infinity, inf_or_nan will be the filled 8-bit exponent but 'norm' will be
816     // all 0s since 'x's mantissa is 0. Thus norm | inf_or_nan becomes f32 infinity. However, if
817     // 'x' is an f16 NaN, some bits of 'norm' will be non-zero, so it stays an f32 NaN after the OR.
818     return sk_bit_cast<Vec<N,float>>((s<<16) | finite | inf_or_nan);
819 }
820 
821 // div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
822 SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
823     return cast<uint8_t>( (x+127)/255 );
824 }
825 
826 // approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
827 // and is always perfect when x or y is 0 or 255.
828 SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
829     // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
830     // We happen to have historically picked (x*y+x)/256.
831     auto X = cast<uint16_t>(x),
832          Y = cast<uint16_t>(y);
833     return cast<uint8_t>( (X*Y+X)/256 );
834 }
835 
836 // saturated_add(x,y) sums values and clamps to the maximum value instead of overflowing.
837 SINT std::enable_if_t<std::is_unsigned_v<T>, Vec<N,T>> saturated_add(const Vec<N,T>& x,
838                                                                      const Vec<N,T>& y) {
839 #if SKVX_USE_SIMD && (SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1 || defined(SK_ARM_HAS_NEON) || \
840         SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX)
841     // Both SSE and ARM have 16-lane saturated adds, so use intrinsics for those and recurse down
842     // or join up to take advantage.
843     if constexpr (N == 16 && sizeof(T) == 1) {
844         #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
845         return sk_bit_cast<Vec<N,T>>(_mm_adds_epu8(sk_bit_cast<__m128i>(x),
846                                                    sk_bit_cast<__m128i>(y)));
847         #elif SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
848         return sk_bit_cast<Vec<N,T>>(__lsx_vsadd_bu(sk_bit_cast<__m128i>(x),
849                                                     sk_bit_cast<__m128i>(y)));
850         #else  // SK_ARM_HAS_NEON
851         return sk_bit_cast<Vec<N,T>>(vqaddq_u8(sk_bit_cast<uint8x16_t>(x),
852                                                sk_bit_cast<uint8x16_t>(y)));
853         #endif
854     } else if constexpr (N < 16 && sizeof(T) == 1) {
855         return saturated_add(join(x,x), join(y,y)).lo;
856     } else if constexpr (sizeof(T) == 1) {
857         return join(saturated_add(x.lo, y.lo), saturated_add(x.hi, y.hi));
858     }
859 #endif
860     // Otherwise saturate manually
861     auto sum = x + y;
862     return if_then_else(sum < x, Vec<N,T>(std::numeric_limits<T>::max()), sum);
863 }
864 
865 // The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that
866 // calculates a numerator / denominator. For this to be rounded properly, numerator should have
867 // half added in:
868 // divide(numerator + half) == floor(numerator/denominator + 1/2).
869 //
870 // This gives an answer within +/- 1 from the true value.
871 //
872 // Derivation of half:
873 //    numerator/denominator + 1/2 = (numerator + half) / d
874 //    numerator + denominator / 2 = numerator + half
875 //    half = denominator / 2.
876 //
877 // Because half is divided by 2, that division must also be rounded.
878 //    half == denominator / 2 = (denominator + 1) / 2.
879 //
880 // The divisorFactor is just a scaled value:
881 //    divisorFactor = (1 / divisor) * 2 ^ 32.
882 // The maximum that can be divided and rounded is UINT_MAX - half.
883 class ScaledDividerU32 {
884 public:
885     explicit ScaledDividerU32(uint32_t divisor)
886             : fDivisorFactor{(uint32_t)(std::round((1.0 / divisor) * (1ull << 32)))}
887             , fHalf{(divisor + 1) >> 1} {
888         assert(divisor > 1);
889     }
890 
891     Vec<4, uint32_t> divide(const Vec<4, uint32_t>& numerator) const {
892 #if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
893         uint64x2_t hi = vmull_n_u32(vget_high_u32(to_vext(numerator)), fDivisorFactor);
894         uint64x2_t lo = vmull_n_u32(vget_low_u32(to_vext(numerator)),  fDivisorFactor);
895 
896         return to_vec<4, uint32_t>(vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)));
897 #else
898         return cast<uint32_t>((cast<uint64_t>(numerator) * fDivisorFactor) >> 32);
899 #endif
900     }
901 
902     uint32_t half() const { return fHalf; }
903     uint32_t divisorFactor() const { return fDivisorFactor; }
904 
905 private:
906     const uint32_t fDivisorFactor;
907     const uint32_t fHalf;
908 };
909 
910 
911 SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
912                          const Vec<N,uint8_t>& y) {
913 #if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
914     // With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
915     if constexpr (N == 8) {
916         return to_vec<8,uint16_t>(vmull_u8(to_vext(x), to_vext(y)));
917     } else if constexpr (N < 8) {
918         return mull(join(x,x), join(y,y)).lo;
919     } else { // N > 8
920         return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
921     }
922 #else
923     return cast<uint16_t>(x) * cast<uint16_t>(y);
924 #endif
925 }
926 
927 SIN Vec<N,uint32_t> mull(const Vec<N,uint16_t>& x,
928                          const Vec<N,uint16_t>& y) {
929 #if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
930     // NEON can do four u16*u16 -> u32 in one instruction, vmull_u16
931     if constexpr (N == 4) {
932         return to_vec<4,uint32_t>(vmull_u16(to_vext(x), to_vext(y)));
933     } else if constexpr (N < 4) {
934         return mull(join(x,x), join(y,y)).lo;
935     } else { // N > 4
936         return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
937     }
938 #else
939     return cast<uint32_t>(x) * cast<uint32_t>(y);
940 #endif
941 }
942 
943 SIN Vec<N,uint16_t> mulhi(const Vec<N,uint16_t>& x,
944                           const Vec<N,uint16_t>& y) {
945 #if SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
946     // Use _mm_mulhi_epu16 for 8xuint16_t and join or split to get there.
947     if constexpr (N == 8) {
948         return sk_bit_cast<Vec<8,uint16_t>>(_mm_mulhi_epu16(sk_bit_cast<__m128i>(x),
949                                                             sk_bit_cast<__m128i>(y)));
950     } else if constexpr (N < 8) {
951         return mulhi(join(x,x), join(y,y)).lo;
952     } else { // N > 8
953         return join(mulhi(x.lo, y.lo), mulhi(x.hi, y.hi));
954     }
955 #elif SKVX_USE_SIMD && SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
956     if constexpr (N == 8) {
957         return sk_bit_cast<Vec<8,uint16_t>>(__lsx_vmuh_hu(sk_bit_cast<__m128i>(x),
958                                                           sk_bit_cast<__m128i>(y)));
959     } else if constexpr (N < 8) {
960         return mulhi(join(x,x), join(y,y)).lo;
961     } else { // N > 8
962         return join(mulhi(x.lo, y.lo), mulhi(x.hi, y.hi));
963     }
964 #else
965     return skvx::cast<uint16_t>(mull(x, y) >> 16);
966 #endif
967 }
968 
969 SINT T dot(const Vec<N, T>& a, const Vec<N, T>& b) {
970     // While dot is a "horizontal" operation like any or all, it needs to remain
971     // in floating point and there aren't really any good SIMD instructions that make it faster.
972     // The constexpr cases remove the for loop in the only cases we realistically call.
973     auto ab = a*b;
974     if constexpr (N == 2) {
975         return ab[0] + ab[1];
976     } else if constexpr (N == 4) {
977         return ab[0] + ab[1] + ab[2] + ab[3];
978     } else {
979         T sum = ab[0];
980         for (int i = 1; i < N; ++i) {
981             sum += ab[i];
982         }
983         return sum;
984     }
985 }
986 
987 SIT T cross(const Vec<2, T>& a, const Vec<2, T>& b) {
988     auto x = a * shuffle<1,0>(b);
989     return x[0] - x[1];
990 }
991 
992 SIN float length(const Vec<N, float>& v) {
993     return std::sqrt(dot(v, v));
994 }
995 
996 SIN double length(const Vec<N, double>& v) {
997     return std::sqrt(dot(v, v));
998 }
999 
1000 SIN Vec<N, float> normalize(const Vec<N, float>& v) {
1001     return v / length(v);
1002 }
1003 
1004 SIN Vec<N, double> normalize(const Vec<N, double>& v) {
1005     return v / length(v);
1006 }
1007 
1008 SINT bool isfinite(const Vec<N, T>& v) {
1009     // Multiply all values together with 0. If they were all finite, the output is
1010     // 0 (also finite). If any were not, we'll get nan.
1011     return SkIsFinite(dot(v, Vec<N, T>(0)));
1012 }
1013 
1014 // De-interleaving load of 4 vectors.
1015 //
1016 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
1017 // resorting to these methods.
1018 SIT void strided_load4(const T* v,
1019                        Vec<1,T>& a,
1020                        Vec<1,T>& b,
1021                        Vec<1,T>& c,
1022                        Vec<1,T>& d) {
1023     a.val = v[0];
1024     b.val = v[1];
1025     c.val = v[2];
1026     d.val = v[3];
1027 }
1028 SINT void strided_load4(const T* v,
1029                         Vec<N,T>& a,
1030                         Vec<N,T>& b,
1031                         Vec<N,T>& c,
1032                         Vec<N,T>& d) {
1033     strided_load4(v, a.lo, b.lo, c.lo, d.lo);
1034     strided_load4(v + 4*(N/2), a.hi, b.hi, c.hi, d.hi);
1035 }
1036 #if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
1037 #define IMPL_LOAD4_TRANSPOSED(N, T, VLD) \
1038 SI void strided_load4(const T* v, \
1039                       Vec<N,T>& a, \
1040                       Vec<N,T>& b, \
1041                       Vec<N,T>& c, \
1042                       Vec<N,T>& d) { \
1043     auto mat = VLD(v); \
1044     a = sk_bit_cast<Vec<N,T>>(mat.val[0]); \
1045     b = sk_bit_cast<Vec<N,T>>(mat.val[1]); \
1046     c = sk_bit_cast<Vec<N,T>>(mat.val[2]); \
1047     d = sk_bit_cast<Vec<N,T>>(mat.val[3]); \
1048 }
1049 IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32)
1050 IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16)
1051 IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8)
1052 IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32)
1053 IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16)
1054 IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8)
1055 IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32)
1056 IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32)
1057 IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16)
1058 IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8)
1059 IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32)
1060 IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16)
1061 IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8)
1062 IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32)
1063 #undef IMPL_LOAD4_TRANSPOSED
1064 
1065 #elif SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
1066 
1067 SI void strided_load4(const float* v,
1068                       Vec<4,float>& a,
1069                       Vec<4,float>& b,
1070                       Vec<4,float>& c,
1071                       Vec<4,float>& d) {
1072     __m128 a_ = _mm_loadu_ps(v);
1073     __m128 b_ = _mm_loadu_ps(v+4);
1074     __m128 c_ = _mm_loadu_ps(v+8);
1075     __m128 d_ = _mm_loadu_ps(v+12);
1076     _MM_TRANSPOSE4_PS(a_, b_, c_, d_);
1077     a = sk_bit_cast<Vec<4,float>>(a_);
1078     b = sk_bit_cast<Vec<4,float>>(b_);
1079     c = sk_bit_cast<Vec<4,float>>(c_);
1080     d = sk_bit_cast<Vec<4,float>>(d_);
1081 }
1082 
1083 #elif SKVX_USE_SIMD && SKVX_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
1084 #define _LSX_TRANSPOSE4(row0, row1, row2, row3) \
1085 do {                                            \
1086     __m128i __t0 = __lsx_vilvl_w (row1, row0);  \
1087     __m128i __t1 = __lsx_vilvl_w (row3, row2);  \
1088     __m128i __t2 = __lsx_vilvh_w (row1, row0);  \
1089     __m128i __t3 = __lsx_vilvh_w (row3, row2);  \
1090     (row0) = __lsx_vilvl_d (__t1, __t0);        \
1091     (row1) = __lsx_vilvh_d (__t1, __t0);        \
1092     (row2) = __lsx_vilvl_d (__t3, __t2);        \
1093     (row3) = __lsx_vilvh_d (__t3, __t2);        \
1094 } while (0)
1095 
1096 SI void strided_load4(const int* v,
1097                       Vec<4,int>& a,
1098                       Vec<4,int>& b,
1099                       Vec<4,int>& c,
1100                       Vec<4,int>& d) {
1101     __m128i a_ = __lsx_vld(v, 0);
1102     __m128i b_ = __lsx_vld(v, 16);
1103     __m128i c_ = __lsx_vld(v, 32);
1104     __m128i d_ = __lsx_vld(v, 48);
1105     _LSX_TRANSPOSE4(a_, b_, c_, d_);
1106     a = sk_bit_cast<Vec<4,int>>(a_);
1107     b = sk_bit_cast<Vec<4,int>>(b_);
1108     c = sk_bit_cast<Vec<4,int>>(c_);
1109     d = sk_bit_cast<Vec<4,int>>(d_);
1110 }
1111 #endif
1112 
1113 // De-interleaving load of 2 vectors.
1114 //
1115 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
1116 // resorting to these methods.
1117 SIT void strided_load2(const T* v, Vec<1,T>& a, Vec<1,T>& b) {
1118     a.val = v[0];
1119     b.val = v[1];
1120 }
1121 SINT void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) {
1122     strided_load2(v, a.lo, b.lo);
1123     strided_load2(v + 2*(N/2), a.hi, b.hi);
1124 }
1125 #if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
1126 #define IMPL_LOAD2_TRANSPOSED(N, T, VLD) \
1127 SI void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) { \
1128     auto mat = VLD(v); \
1129     a = sk_bit_cast<Vec<N,T>>(mat.val[0]); \
1130     b = sk_bit_cast<Vec<N,T>>(mat.val[1]); \
1131 }
1132 IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32)
1133 IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16)
1134 IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8)
1135 IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32)
1136 IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16)
1137 IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8)
1138 IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32)
1139 IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32)
1140 IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16)
1141 IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8)
1142 IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32)
1143 IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16)
1144 IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8)
1145 IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32)
1146 #undef IMPL_LOAD2_TRANSPOSED
1147 #endif
1148 
1149 // Define commonly used aliases
1150 using float2  = Vec< 2, float>;
1151 using float4  = Vec< 4, float>;
1152 using float8  = Vec< 8, float>;
1153 
1154 using double2 = Vec< 2, double>;
1155 using double4 = Vec< 4, double>;
1156 using double8 = Vec< 8, double>;
1157 
1158 using byte2   = Vec< 2, uint8_t>;
1159 using byte4   = Vec< 4, uint8_t>;
1160 using byte8   = Vec< 8, uint8_t>;
1161 using byte16  = Vec<16, uint8_t>;
1162 
1163 using int2    = Vec< 2, int32_t>;
1164 using int4    = Vec< 4, int32_t>;
1165 using int8    = Vec< 8, int32_t>;
1166 
1167 using ushort2 = Vec< 2, uint16_t>;
1168 using ushort4 = Vec< 4, uint16_t>;
1169 using ushort8 = Vec< 8, uint16_t>;
1170 
1171 using uint2   = Vec< 2, uint32_t>;
1172 using uint4   = Vec< 4, uint32_t>;
1173 using uint8   = Vec< 8, uint32_t>;
1174 
1175 using long2   = Vec< 2, int64_t>;
1176 using long4   = Vec< 4, int64_t>;
1177 using long8   = Vec< 8, int64_t>;
1178 
1179 // Use with from_half and to_half to convert between floatX, and use these for storage.
1180 using half2   = Vec< 2, uint16_t>;
1181 using half4   = Vec< 4, uint16_t>;
1182 using half8   = Vec< 8, uint16_t>;
1183 
1184 }  // namespace skvx
1185 
1186 #undef SINTU
1187 #undef SINT
1188 #undef SIN
1189 #undef SIT
1190 #undef SI
1191 #undef SKVX_ALWAYS_INLINE
1192 #undef SKVX_USE_SIMD
1193 
1194 #endif//SKVX_DEFINED
1195