xref: /aosp_15_r20/external/mesa3d/src/gallium/auxiliary/util/u_sse.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /**************************************************************************
2  *
3  * Copyright 2008-2021 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * @file
30  * SSE intrinsics portability header.
31  *
32  * Although the SSE intrinsics are support by all modern x86 and x86-64
33  * compilers, there are some intrisincs missing in some implementations
34  * (especially older MSVC versions). This header abstracts that away.
35  */
36 
37 #ifndef U_SSE_H_
38 #define U_SSE_H_
39 
40 #include "util/detect.h"
41 #include "util/compiler.h"
42 #include "util/u_debug.h"
43 
44 #if DETECT_ARCH_SSE
45 
46 #include <emmintrin.h>
47 
48 
49 union m128i {
50    __m128i m;
51    uint8_t ub[16];
52    uint16_t us[8];
53    uint32_t ui[4];
54 };
55 
56 /*
57  * Provide an SSE implementation of _mm_mul_epi32() in terms of
58  * _mm_mul_epu32().
59  *
60  * Basically, albeit surprising at first (and second, and third...) look
61  * if a * b is done signed instead of unsigned, can just
62  * subtract b from the high bits of the result if a is negative
63  * (and the same for a if b is negative). Modular arithmetic at its best!
64  *
65  * So for int32 a,b in crude pseudo-code ("*" here denoting a widening mul)
66  * fixupb = (signmask(b) & a) << 32ULL
67  * fixupa = (signmask(a) & b) << 32ULL
68  * a * b = (unsigned)a * (unsigned)b - fixupb - fixupa
69  * = (unsigned)a * (unsigned)b -(fixupb + fixupa)
70  *
71  * This does both lo (dwords 0/2) and hi parts (1/3) at the same time due
72  * to some optimization potential.
73  */
74 static inline __m128i
mm_mullohi_epi32(const __m128i a,const __m128i b,__m128i * res13)75 mm_mullohi_epi32(const __m128i a, const __m128i b, __m128i *res13)
76 {
77    __m128i a13, b13, mul02, mul13;
78    __m128i anegmask, bnegmask, fixup, fixup02, fixup13;
79    a13 = _mm_shuffle_epi32(a, _MM_SHUFFLE(2,3,0,1));
80    b13 = _mm_shuffle_epi32(b, _MM_SHUFFLE(2,3,0,1));
81    anegmask = _mm_srai_epi32(a, 31);
82    bnegmask = _mm_srai_epi32(b, 31);
83    fixup = _mm_add_epi32(_mm_and_si128(anegmask, b),
84                          _mm_and_si128(bnegmask, a));
85    mul02 = _mm_mul_epu32(a, b);
86    mul13 = _mm_mul_epu32(a13, b13);
87    fixup02 = _mm_slli_epi64(fixup, 32);
88    fixup13 = _mm_and_si128(fixup, _mm_set_epi32(-1,0,-1,0));
89    *res13 = _mm_sub_epi64(mul13, fixup13);
90    return _mm_sub_epi64(mul02, fixup02);
91 }
92 
93 
94 /* Provide an SSE2 implementation of _mm_mullo_epi32() in terms of
95  * _mm_mul_epu32().
96  *
97  * This always works regardless the signs of the operands, since
98  * the high bits (which would be different) aren't used.
99  *
100  * This seems close enough to the speed of SSE4 and the real
101  * _mm_mullo_epi32() intrinsic as to not justify adding an sse4
102  * dependency at this point.
103  */
mm_mullo_epi32(const __m128i a,const __m128i b)104 static inline __m128i mm_mullo_epi32(const __m128i a, const __m128i b)
105 {
106    __m128i a4   = _mm_srli_epi64(a, 32);  /* shift by one dword */
107    __m128i b4   = _mm_srli_epi64(b, 32);  /* shift by one dword */
108    __m128i ba   = _mm_mul_epu32(b, a);   /* multply dwords 0, 2 */
109    __m128i b4a4 = _mm_mul_epu32(b4, a4); /* multiply dwords 1, 3 */
110 
111    /* Interleave the results, either with shuffles or (slightly
112     * faster) direct bit operations:
113     * XXX: might be only true for some cpus (in particular 65nm
114     * Core 2). On most cpus (including that Core 2, but not Nehalem...)
115     * using _mm_shuffle_ps/_mm_shuffle_epi32 might also be faster
116     * than using the 3 instructions below. But logic should be fine
117     * as well, we can't have optimal solution for all cpus (if anything,
118     * should just use _mm_mullo_epi32() if sse41 is available...).
119     */
120 #if 0
121    __m128i ba8             = _mm_shuffle_epi32(ba, 8);
122    __m128i b4a48           = _mm_shuffle_epi32(b4a4, 8);
123    __m128i result          = _mm_unpacklo_epi32(ba8, b4a48);
124 #else
125    __m128i mask            = _mm_setr_epi32(~0,0,~0,0);
126    __m128i ba_mask         = _mm_and_si128(ba, mask);
127    __m128i b4a4_mask_shift = _mm_slli_epi64(b4a4, 32);
128    __m128i result          = _mm_or_si128(ba_mask, b4a4_mask_shift);
129 #endif
130 
131    return result;
132 }
133 
134 
135 static inline void
transpose4_epi32(const __m128i * restrict a,const __m128i * restrict b,const __m128i * restrict c,const __m128i * restrict d,__m128i * restrict o,__m128i * restrict p,__m128i * restrict q,__m128i * restrict r)136 transpose4_epi32(const __m128i * restrict a,
137                  const __m128i * restrict b,
138                  const __m128i * restrict c,
139                  const __m128i * restrict d,
140                  __m128i * restrict o,
141                  __m128i * restrict p,
142                  __m128i * restrict q,
143                  __m128i * restrict r)
144 {
145    __m128i t0 = _mm_unpacklo_epi32(*a, *b);
146    __m128i t1 = _mm_unpacklo_epi32(*c, *d);
147    __m128i t2 = _mm_unpackhi_epi32(*a, *b);
148    __m128i t3 = _mm_unpackhi_epi32(*c, *d);
149 
150    *o = _mm_unpacklo_epi64(t0, t1);
151    *p = _mm_unpackhi_epi64(t0, t1);
152    *q = _mm_unpacklo_epi64(t2, t3);
153    *r = _mm_unpackhi_epi64(t2, t3);
154 }
155 
156 
157 /*
158  * Same as above, except the first two values are already interleaved
159  * (i.e. contain 64bit values).
160  */
161 static inline void
transpose2_64_2_32(const __m128i * restrict a01,const __m128i * restrict a23,const __m128i * restrict c,const __m128i * restrict d,__m128i * restrict o,__m128i * restrict p,__m128i * restrict q,__m128i * restrict r)162 transpose2_64_2_32(const __m128i * restrict a01,
163                    const __m128i * restrict a23,
164                    const __m128i * restrict c,
165                    const __m128i * restrict d,
166                    __m128i * restrict o,
167                    __m128i * restrict p,
168                    __m128i * restrict q,
169                    __m128i * restrict r)
170 {
171    __m128i t0 = *a01;
172    __m128i t1 = _mm_unpacklo_epi32(*c, *d);
173    __m128i t2 = *a23;
174    __m128i t3 = _mm_unpackhi_epi32(*c, *d);
175 
176    *o = _mm_unpacklo_epi64(t0, t1);
177    *p = _mm_unpackhi_epi64(t0, t1);
178    *q = _mm_unpacklo_epi64(t2, t3);
179    *r = _mm_unpackhi_epi64(t2, t3);
180 }
181 
182 
183 #define SCALAR_EPI32(m, i) _mm_shuffle_epi32((m), _MM_SHUFFLE(i,i,i,i))
184 
185 
186 /*
187  * Implements (1-w)*a + w*b = a - wa + wb = w(b-a) + a
188  * ((b-a)*w >> 8) + a
189  * The math behind negative sub results (logic shift/mask) is tricky.
190  *
191  * w -- weight values
192  * a -- src0 values
193  * b -- src1 values
194  */
195 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi16(__m128i w,__m128i a,__m128i b)196 util_sse2_lerp_epi16(__m128i w, __m128i a, __m128i b)
197 {
198    __m128i res;
199 
200    res = _mm_sub_epi16(b, a);
201    res = _mm_mullo_epi16(res, w);
202    res = _mm_srli_epi16(res, 8);
203    /* use add_epi8 instead of add_epi16 so no need to mask off upper bits */
204    res = _mm_add_epi8(res, a);
205 
206    return res;
207 }
208 
209 
210 /* Apply premultiplied-alpha blending on two pixels simultaneously.
211  * All parameters are packed as 8.8 fixed point values in __m128i SSE
212  * registers, with the upper 8 bits all zero.
213  *
214  * a -- src alpha values
215  * d -- dst color values
216  * s -- src color values
217  */
218 static inline __m128i
util_sse2_premul_blend_epi16(__m128i a,__m128i d,__m128i s)219 util_sse2_premul_blend_epi16( __m128i a, __m128i d, __m128i s)
220 {
221    __m128i da, d_sub_da, tmp;
222    tmp      = _mm_mullo_epi16(d, a);
223    da       = _mm_srli_epi16(tmp, 8);
224    d_sub_da = _mm_sub_epi16(d, da);
225 
226    return  _mm_add_epi16(s, d_sub_da);
227 }
228 
229 
230 /* Apply premultiplied-alpha blending on four pixels in packed BGRA
231  * format (one/inv_src_alpha blend mode).
232  *
233  * src    -- four pixels (bgra8 format)
234  * dst    -- four destination pixels (bgra8)
235  * return -- blended pixels (bgra8)
236  */
237 static ALWAYS_INLINE __m128i
util_sse2_blend_premul_4(const __m128i src,const __m128i dst)238 util_sse2_blend_premul_4(const __m128i src,
239                          const __m128i dst)
240 {
241 
242    __m128i al, ah, dl, dh, sl, sh, rl, rh;
243    __m128i zero = _mm_setzero_si128();
244 
245    /* Blend first two pixels:
246     */
247    sl = _mm_unpacklo_epi8(src, zero);
248    dl = _mm_unpacklo_epi8(dst, zero);
249 
250    al = _mm_shufflehi_epi16(sl, 0xff);
251    al = _mm_shufflelo_epi16(al, 0xff);
252 
253    rl = util_sse2_premul_blend_epi16(al, dl, sl);
254 
255    /* Blend second two pixels:
256     */
257    sh = _mm_unpackhi_epi8(src, zero);
258    dh = _mm_unpackhi_epi8(dst, zero);
259 
260    ah = _mm_shufflehi_epi16(sh, 0xff);
261    ah = _mm_shufflelo_epi16(ah, 0xff);
262 
263    rh = util_sse2_premul_blend_epi16(ah, dh, sh);
264 
265    /* Pack the results down to four bgra8 pixels:
266     */
267    return _mm_packus_epi16(rl, rh);
268 }
269 
270 
271 /* Apply src-alpha blending on four pixels in packed BGRA
272  * format (srcalpha/inv_src_alpha blend mode).
273  *
274  * src    -- four pixels (bgra8 format)
275  * dst    -- four destination pixels (bgra8)
276  * return -- blended pixels (bgra8)
277  */
278 static ALWAYS_INLINE __m128i
util_sse2_blend_srcalpha_4(const __m128i src,const __m128i dst)279 util_sse2_blend_srcalpha_4(const __m128i src,
280                            const __m128i dst)
281 {
282 
283    __m128i al, ah, dl, dh, sl, sh, rl, rh;
284    __m128i zero = _mm_setzero_si128();
285 
286    /* Blend first two pixels:
287     */
288    sl = _mm_unpacklo_epi8(src, zero);
289    dl = _mm_unpacklo_epi8(dst, zero);
290 
291    al = _mm_shufflehi_epi16(sl, 0xff);
292    al = _mm_shufflelo_epi16(al, 0xff);
293 
294    rl = util_sse2_lerp_epi16(al, dl, sl);
295 
296    /* Blend second two pixels:
297     */
298    sh = _mm_unpackhi_epi8(src, zero);
299    dh = _mm_unpackhi_epi8(dst, zero);
300 
301    ah = _mm_shufflehi_epi16(sh, 0xff);
302    ah = _mm_shufflelo_epi16(ah, 0xff);
303 
304    rh = util_sse2_lerp_epi16(ah, dh, sh);
305 
306    /* Pack the results down to four bgra8 pixels:
307     */
308    return _mm_packus_epi16(rl, rh);
309 }
310 
311 
312 /**
313  * premultiplies src with constant alpha then
314  * does one/inv_src_alpha blend.
315  *
316  * src 16xi8 (normalized)
317  * dst 16xi8 (normalized)
318  * cst_alpha (constant alpha (u8 value))
319  */
320 static ALWAYS_INLINE __m128i
util_sse2_blend_premul_src_4(const __m128i src,const __m128i dst,const unsigned cst_alpha)321 util_sse2_blend_premul_src_4(const __m128i src,
322                              const __m128i dst,
323                              const unsigned cst_alpha)
324 {
325 
326    __m128i srca, d, s, rl, rh;
327    __m128i zero = _mm_setzero_si128();
328    __m128i cst_alpha_vec = _mm_set1_epi16(cst_alpha);
329 
330    /* Blend first two pixels:
331     */
332    s = _mm_unpacklo_epi8(src, zero);
333    s = _mm_mullo_epi16(s, cst_alpha_vec);
334    /* the shift will cause some precision loss */
335    s = _mm_srli_epi16(s, 8);
336 
337    srca = _mm_shufflehi_epi16(s, 0xff);
338    srca = _mm_shufflelo_epi16(srca, 0xff);
339 
340    d = _mm_unpacklo_epi8(dst, zero);
341    rl = util_sse2_premul_blend_epi16(srca, d, s);
342 
343    /* Blend second two pixels:
344     */
345    s = _mm_unpackhi_epi8(src, zero);
346    s = _mm_mullo_epi16(s, cst_alpha_vec);
347    /* the shift will cause some precision loss */
348    s = _mm_srli_epi16(s, 8);
349 
350    srca = _mm_shufflehi_epi16(s, 0xff);
351    srca = _mm_shufflelo_epi16(srca, 0xff);
352 
353    d = _mm_unpackhi_epi8(dst, zero);
354    rh = util_sse2_premul_blend_epi16(srca, d, s);
355 
356    /* Pack the results down to four bgra8 pixels:
357     */
358    return _mm_packus_epi16(rl, rh);
359 }
360 
361 
362 /**
363  * Linear interpolation with SSE2.
364  *
365  * dst, src0, src1 are 16 x i8 vectors, with [0..255] normalized values.
366  *
367  * weight_lo and weight_hi should be a 8 x i16 vectors, in 8.8 fixed point
368  * format, for the low and high components.
369  * We'd want to pass these as values but MSVC limitation forces us to pass these
370  * as pointers since it will complain if more than 3 __m128 are passed by value.
371  */
372 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi8_fixed88(__m128i src0,__m128i src1,const __m128i * restrict weight_lo,const __m128i * restrict weight_hi)373 util_sse2_lerp_epi8_fixed88(__m128i src0, __m128i src1,
374                             const __m128i * restrict weight_lo,
375                             const __m128i * restrict weight_hi)
376 {
377    const __m128i zero = _mm_setzero_si128();
378 
379    __m128i src0_lo = _mm_unpacklo_epi8(src0, zero);
380    __m128i src0_hi = _mm_unpackhi_epi8(src0, zero);
381 
382    __m128i src1_lo = _mm_unpacklo_epi8(src1, zero);
383    __m128i src1_hi = _mm_unpackhi_epi8(src1, zero);
384 
385    __m128i dst_lo;
386    __m128i dst_hi;
387 
388    dst_lo = util_sse2_lerp_epi16(*weight_lo, src0_lo, src1_lo);
389    dst_hi = util_sse2_lerp_epi16(*weight_hi, src0_hi, src1_hi);
390 
391    return _mm_packus_epi16(dst_lo, dst_hi);
392 }
393 
394 
395 /**
396  * Linear interpolation with SSE2.
397  *
398  * dst, src0, src1 are 16 x i8 vectors, with [0..255] normalized values.
399  *
400  * weight should be a 16 x i8 vector, in 0.8 fixed point values.
401  */
402 static ALWAYS_INLINE __m128i
util_sse2_lerp_epi8_fixed08(__m128i src0,__m128i src1,__m128i weight)403 util_sse2_lerp_epi8_fixed08(__m128i src0, __m128i src1,
404                             __m128i weight)
405 {
406    const __m128i zero = _mm_setzero_si128();
407    __m128i weight_lo = _mm_unpacklo_epi8(weight, zero);
408    __m128i weight_hi = _mm_unpackhi_epi8(weight, zero);
409 
410    return util_sse2_lerp_epi8_fixed88(src0, src1,
411                                       &weight_lo, &weight_hi);
412 }
413 
414 
415 /**
416  * Linear interpolation with SSE2.
417  *
418  * dst, src0, src1, and weight are 16 x i8 vectors, with [0..255] normalized
419  * values.
420  */
421 static ALWAYS_INLINE __m128i
util_sse2_lerp_unorm8(__m128i src0,__m128i src1,__m128i weight)422 util_sse2_lerp_unorm8(__m128i src0, __m128i src1,
423                       __m128i weight)
424 {
425    const __m128i zero = _mm_setzero_si128();
426    __m128i weight_lo = _mm_unpacklo_epi8(weight, zero);
427    __m128i weight_hi = _mm_unpackhi_epi8(weight, zero);
428 
429 #if 0
430    /*
431     * Rescale from [0..255] to [0..256].
432     */
433    weight_lo = _mm_add_epi16(weight_lo, _mm_srli_epi16(weight_lo, 7));
434    weight_hi = _mm_add_epi16(weight_hi, _mm_srli_epi16(weight_hi, 7));
435 #endif
436 
437    return util_sse2_lerp_epi8_fixed88(src0, src1,
438                                       &weight_lo, &weight_hi);
439 }
440 
441 
442 /**
443  * Linear interpolation with SSE2.
444  *
445  * dst, src0, src1, src2, src3 are 16 x i8 vectors, with [0..255] normalized
446  * values.
447  *
448  * ws_lo, ws_hi, wt_lo, wt_hi should be a 8 x i16 vectors, in 8.8 fixed point
449  * format, for the low and high components.
450  * We'd want to pass these as values but MSVC limitation forces us to pass these
451  * as pointers since it will complain if more than 3 __m128 are passed by value.
452  *
453  * This uses ws_lo, ws_hi to interpolate between src0 and src1, as well as to
454  * interpolate between src2 and src3, then uses wt_lo and wt_hi to interpolate
455  * between the resulting vectors.
456  */
457 static ALWAYS_INLINE __m128i
util_sse2_lerp_2d_epi8_fixed88(__m128i src0,__m128i src1,const __m128i * restrict src2,const __m128i * restrict src3,const __m128i * restrict ws_lo,const __m128i * restrict ws_hi,const __m128i * restrict wt_lo,const __m128i * restrict wt_hi)458 util_sse2_lerp_2d_epi8_fixed88(__m128i src0, __m128i src1,
459                                const __m128i * restrict src2,
460                                const __m128i * restrict src3,
461                                const __m128i * restrict ws_lo,
462                                const __m128i * restrict ws_hi,
463                                const __m128i * restrict wt_lo,
464                                const __m128i * restrict wt_hi)
465 {
466    const __m128i zero = _mm_setzero_si128();
467 
468    __m128i src0_lo = _mm_unpacklo_epi8(src0, zero);
469    __m128i src0_hi = _mm_unpackhi_epi8(src0, zero);
470 
471    __m128i src1_lo = _mm_unpacklo_epi8(src1, zero);
472    __m128i src1_hi = _mm_unpackhi_epi8(src1, zero);
473 
474    __m128i src2_lo = _mm_unpacklo_epi8(*src2, zero);
475    __m128i src2_hi = _mm_unpackhi_epi8(*src2, zero);
476 
477    __m128i src3_lo = _mm_unpacklo_epi8(*src3, zero);
478    __m128i src3_hi = _mm_unpackhi_epi8(*src3, zero);
479 
480    __m128i dst_lo, dst01_lo, dst23_lo;
481    __m128i dst_hi, dst01_hi, dst23_hi;
482 
483    dst01_lo = util_sse2_lerp_epi16(*ws_lo, src0_lo, src1_lo);
484    dst01_hi = util_sse2_lerp_epi16(*ws_hi, src0_hi, src1_hi);
485    dst23_lo = util_sse2_lerp_epi16(*ws_lo, src2_lo, src3_lo);
486    dst23_hi = util_sse2_lerp_epi16(*ws_hi, src2_hi, src3_hi);
487 
488    dst_lo = util_sse2_lerp_epi16(*wt_lo, dst01_lo, dst23_lo);
489    dst_hi = util_sse2_lerp_epi16(*wt_hi, dst01_hi, dst23_hi);
490 
491    return _mm_packus_epi16(dst_lo, dst_hi);
492 }
493 
494 /**
495  * Stretch a row of pixels using linear filter.
496  *
497  * Uses Bresenham's line algorithm using 16.16 fixed point representation for
498  * the error term.
499  *
500  * @param dst_width destination width in pixels
501  * @param src_x    start x0 in 16.16 fixed point format
502  * @param src_xstep step in 16.16. fixed point format
503  *
504  * @return final src_x value (i.e., src_x + dst_width*src_xstep)
505  */
506 static ALWAYS_INLINE int32_t
util_sse2_stretch_row_8unorm(__m128i * restrict dst,int32_t dst_width,const uint32_t * restrict src,int32_t src_x,int32_t src_xstep)507 util_sse2_stretch_row_8unorm(__m128i * restrict dst,
508                              int32_t dst_width,
509                              const uint32_t * restrict src,
510                              int32_t src_x,
511                              int32_t src_xstep)
512 {
513    int16_t error0, error1, error2, error3;
514    __m128i error_lo, error_hi, error_step;
515 
516    assert(dst_width >= 0);
517    assert(dst_width % 4 == 0);
518 
519    error0 = src_x;
520    error1 = error0 + src_xstep;
521    error2 = error1 + src_xstep;
522    error3 = error2 + src_xstep;
523 
524    error_lo   = _mm_setr_epi16(error0, error0, error0, error0,
525                                error1, error1, error1, error1);
526    error_hi   = _mm_setr_epi16(error2, error2, error2, error2,
527                                error3, error3, error3, error3);
528    error_step = _mm_set1_epi16(src_xstep << 2);
529 
530    dst_width >>= 2;
531    while (dst_width) {
532       uint16_t src_x0;
533       uint16_t src_x1;
534       uint16_t src_x2;
535       uint16_t src_x3;
536       __m128i src0, src1;
537       __m128i weight_lo, weight_hi;
538 
539       /*
540        * It is faster to re-compute the coordinates in the scalar integer unit here,
541        * than to fetch the values from the SIMD integer unit.
542        */
543 
544       src_x0 = src_x >> 16;
545       src_x += src_xstep;
546       src_x1 = src_x >> 16;
547       src_x += src_xstep;
548       src_x2 = src_x >> 16;
549       src_x += src_xstep;
550       src_x3 = src_x >> 16;
551       src_x += src_xstep;
552 
553       /*
554        * Fetch pairs of pixels 64bit at a time, and then swizzle them inplace.
555        */
556 
557       {
558          __m128i src_00_10 = _mm_loadl_epi64((const __m128i *)&src[src_x0]);
559          __m128i src_01_11 = _mm_loadl_epi64((const __m128i *)&src[src_x1]);
560          __m128i src_02_12 = _mm_loadl_epi64((const __m128i *)&src[src_x2]);
561          __m128i src_03_13 = _mm_loadl_epi64((const __m128i *)&src[src_x3]);
562 
563          __m128i src_00_01_10_11 = _mm_unpacklo_epi32(src_00_10, src_01_11);
564          __m128i src_02_03_12_13 = _mm_unpacklo_epi32(src_02_12, src_03_13);
565 
566          src0 = _mm_unpacklo_epi64(src_00_01_10_11, src_02_03_12_13);
567          src1 = _mm_unpackhi_epi64(src_00_01_10_11, src_02_03_12_13);
568       }
569 
570       weight_lo = _mm_srli_epi16(error_lo, 8);
571       weight_hi = _mm_srli_epi16(error_hi, 8);
572 
573       *dst = util_sse2_lerp_epi8_fixed88(src0, src1,
574                                          &weight_lo, &weight_hi);
575 
576       error_lo = _mm_add_epi16(error_lo, error_step);
577       error_hi = _mm_add_epi16(error_hi, error_step);
578 
579       ++dst;
580       --dst_width;
581    }
582 
583    return src_x;
584 }
585 
586 
587 
588 #endif /* DETECT_ARCH_SSE */
589 
590 #endif /* U_SSE_H_ */
591