xref: /aosp_15_r20/external/webp/src/dsp/yuv_sse2.c (revision b2055c353e87c8814eb2b6b1b11112a1562253bd)
1*b2055c35SXin Li // Copyright 2014 Google Inc. All Rights Reserved.
2*b2055c35SXin Li //
3*b2055c35SXin Li // Use of this source code is governed by a BSD-style license
4*b2055c35SXin Li // that can be found in the COPYING file in the root of the source
5*b2055c35SXin Li // tree. An additional intellectual property rights grant can be found
6*b2055c35SXin Li // in the file PATENTS. All contributing project authors may
7*b2055c35SXin Li // be found in the AUTHORS file in the root of the source tree.
8*b2055c35SXin Li // -----------------------------------------------------------------------------
9*b2055c35SXin Li //
10*b2055c35SXin Li // YUV->RGB conversion functions
11*b2055c35SXin Li //
12*b2055c35SXin Li // Author: Skal ([email protected])
13*b2055c35SXin Li 
14*b2055c35SXin Li #include "src/dsp/yuv.h"
15*b2055c35SXin Li 
16*b2055c35SXin Li #if defined(WEBP_USE_SSE2)
17*b2055c35SXin Li 
18*b2055c35SXin Li #include <stdlib.h>
19*b2055c35SXin Li #include <emmintrin.h>
20*b2055c35SXin Li 
21*b2055c35SXin Li #include "src/dsp/common_sse2.h"
22*b2055c35SXin Li #include "src/utils/utils.h"
23*b2055c35SXin Li 
24*b2055c35SXin Li //-----------------------------------------------------------------------------
25*b2055c35SXin Li // Convert spans of 32 pixels to various RGB formats for the fancy upsampler.
26*b2055c35SXin Li 
27*b2055c35SXin Li // These constants are 14b fixed-point version of ITU-R BT.601 constants.
28*b2055c35SXin Li // R = (19077 * y             + 26149 * v - 14234) >> 6
29*b2055c35SXin Li // G = (19077 * y -  6419 * u - 13320 * v +  8708) >> 6
30*b2055c35SXin Li // B = (19077 * y + 33050 * u             - 17685) >> 6
ConvertYUV444ToRGB_SSE2(const __m128i * const Y0,const __m128i * const U0,const __m128i * const V0,__m128i * const R,__m128i * const G,__m128i * const B)31*b2055c35SXin Li static void ConvertYUV444ToRGB_SSE2(const __m128i* const Y0,
32*b2055c35SXin Li                                     const __m128i* const U0,
33*b2055c35SXin Li                                     const __m128i* const V0,
34*b2055c35SXin Li                                     __m128i* const R,
35*b2055c35SXin Li                                     __m128i* const G,
36*b2055c35SXin Li                                     __m128i* const B) {
37*b2055c35SXin Li   const __m128i k19077 = _mm_set1_epi16(19077);
38*b2055c35SXin Li   const __m128i k26149 = _mm_set1_epi16(26149);
39*b2055c35SXin Li   const __m128i k14234 = _mm_set1_epi16(14234);
40*b2055c35SXin Li   // 33050 doesn't fit in a signed short: only use this with unsigned arithmetic
41*b2055c35SXin Li   const __m128i k33050 = _mm_set1_epi16((short)33050);
42*b2055c35SXin Li   const __m128i k17685 = _mm_set1_epi16(17685);
43*b2055c35SXin Li   const __m128i k6419  = _mm_set1_epi16(6419);
44*b2055c35SXin Li   const __m128i k13320 = _mm_set1_epi16(13320);
45*b2055c35SXin Li   const __m128i k8708  = _mm_set1_epi16(8708);
46*b2055c35SXin Li 
47*b2055c35SXin Li   const __m128i Y1 = _mm_mulhi_epu16(*Y0, k19077);
48*b2055c35SXin Li 
49*b2055c35SXin Li   const __m128i R0 = _mm_mulhi_epu16(*V0, k26149);
50*b2055c35SXin Li   const __m128i R1 = _mm_sub_epi16(Y1, k14234);
51*b2055c35SXin Li   const __m128i R2 = _mm_add_epi16(R1, R0);
52*b2055c35SXin Li 
53*b2055c35SXin Li   const __m128i G0 = _mm_mulhi_epu16(*U0, k6419);
54*b2055c35SXin Li   const __m128i G1 = _mm_mulhi_epu16(*V0, k13320);
55*b2055c35SXin Li   const __m128i G2 = _mm_add_epi16(Y1, k8708);
56*b2055c35SXin Li   const __m128i G3 = _mm_add_epi16(G0, G1);
57*b2055c35SXin Li   const __m128i G4 = _mm_sub_epi16(G2, G3);
58*b2055c35SXin Li 
59*b2055c35SXin Li   // be careful with the saturated *unsigned* arithmetic here!
60*b2055c35SXin Li   const __m128i B0 = _mm_mulhi_epu16(*U0, k33050);
61*b2055c35SXin Li   const __m128i B1 = _mm_adds_epu16(B0, Y1);
62*b2055c35SXin Li   const __m128i B2 = _mm_subs_epu16(B1, k17685);
63*b2055c35SXin Li 
64*b2055c35SXin Li   // use logical shift for B2, which can be larger than 32767
65*b2055c35SXin Li   *R = _mm_srai_epi16(R2, 6);   // range: [-14234, 30815]
66*b2055c35SXin Li   *G = _mm_srai_epi16(G4, 6);   // range: [-10953, 27710]
67*b2055c35SXin Li   *B = _mm_srli_epi16(B2, 6);   // range: [0, 34238]
68*b2055c35SXin Li }
69*b2055c35SXin Li 
70*b2055c35SXin Li // Load the bytes into the *upper* part of 16b words. That's "<< 8", basically.
Load_HI_16_SSE2(const uint8_t * src)71*b2055c35SXin Li static WEBP_INLINE __m128i Load_HI_16_SSE2(const uint8_t* src) {
72*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
73*b2055c35SXin Li   return _mm_unpacklo_epi8(zero, _mm_loadl_epi64((const __m128i*)src));
74*b2055c35SXin Li }
75*b2055c35SXin Li 
76*b2055c35SXin Li // Load and replicate the U/V samples
Load_UV_HI_8_SSE2(const uint8_t * src)77*b2055c35SXin Li static WEBP_INLINE __m128i Load_UV_HI_8_SSE2(const uint8_t* src) {
78*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
79*b2055c35SXin Li   const __m128i tmp0 = _mm_cvtsi32_si128(WebPMemToInt32(src));
80*b2055c35SXin Li   const __m128i tmp1 = _mm_unpacklo_epi8(zero, tmp0);
81*b2055c35SXin Li   return _mm_unpacklo_epi16(tmp1, tmp1);   // replicate samples
82*b2055c35SXin Li }
83*b2055c35SXin Li 
84*b2055c35SXin Li // Convert 32 samples of YUV444 to R/G/B
YUV444ToRGB_SSE2(const uint8_t * const y,const uint8_t * const u,const uint8_t * const v,__m128i * const R,__m128i * const G,__m128i * const B)85*b2055c35SXin Li static void YUV444ToRGB_SSE2(const uint8_t* const y,
86*b2055c35SXin Li                              const uint8_t* const u,
87*b2055c35SXin Li                              const uint8_t* const v,
88*b2055c35SXin Li                              __m128i* const R, __m128i* const G,
89*b2055c35SXin Li                              __m128i* const B) {
90*b2055c35SXin Li   const __m128i Y0 = Load_HI_16_SSE2(y), U0 = Load_HI_16_SSE2(u),
91*b2055c35SXin Li                 V0 = Load_HI_16_SSE2(v);
92*b2055c35SXin Li   ConvertYUV444ToRGB_SSE2(&Y0, &U0, &V0, R, G, B);
93*b2055c35SXin Li }
94*b2055c35SXin Li 
95*b2055c35SXin Li // Convert 32 samples of YUV420 to R/G/B
YUV420ToRGB_SSE2(const uint8_t * const y,const uint8_t * const u,const uint8_t * const v,__m128i * const R,__m128i * const G,__m128i * const B)96*b2055c35SXin Li static void YUV420ToRGB_SSE2(const uint8_t* const y,
97*b2055c35SXin Li                              const uint8_t* const u,
98*b2055c35SXin Li                              const uint8_t* const v,
99*b2055c35SXin Li                              __m128i* const R, __m128i* const G,
100*b2055c35SXin Li                              __m128i* const B) {
101*b2055c35SXin Li   const __m128i Y0 = Load_HI_16_SSE2(y), U0 = Load_UV_HI_8_SSE2(u),
102*b2055c35SXin Li                 V0 = Load_UV_HI_8_SSE2(v);
103*b2055c35SXin Li   ConvertYUV444ToRGB_SSE2(&Y0, &U0, &V0, R, G, B);
104*b2055c35SXin Li }
105*b2055c35SXin Li 
106*b2055c35SXin Li // Pack R/G/B/A results into 32b output.
PackAndStore4_SSE2(const __m128i * const R,const __m128i * const G,const __m128i * const B,const __m128i * const A,uint8_t * const dst)107*b2055c35SXin Li static WEBP_INLINE void PackAndStore4_SSE2(const __m128i* const R,
108*b2055c35SXin Li                                            const __m128i* const G,
109*b2055c35SXin Li                                            const __m128i* const B,
110*b2055c35SXin Li                                            const __m128i* const A,
111*b2055c35SXin Li                                            uint8_t* const dst) {
112*b2055c35SXin Li   const __m128i rb = _mm_packus_epi16(*R, *B);
113*b2055c35SXin Li   const __m128i ga = _mm_packus_epi16(*G, *A);
114*b2055c35SXin Li   const __m128i rg = _mm_unpacklo_epi8(rb, ga);
115*b2055c35SXin Li   const __m128i ba = _mm_unpackhi_epi8(rb, ga);
116*b2055c35SXin Li   const __m128i RGBA_lo = _mm_unpacklo_epi16(rg, ba);
117*b2055c35SXin Li   const __m128i RGBA_hi = _mm_unpackhi_epi16(rg, ba);
118*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(dst +  0), RGBA_lo);
119*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(dst + 16), RGBA_hi);
120*b2055c35SXin Li }
121*b2055c35SXin Li 
122*b2055c35SXin Li // Pack R/G/B/A results into 16b output.
PackAndStore4444_SSE2(const __m128i * const R,const __m128i * const G,const __m128i * const B,const __m128i * const A,uint8_t * const dst)123*b2055c35SXin Li static WEBP_INLINE void PackAndStore4444_SSE2(const __m128i* const R,
124*b2055c35SXin Li                                               const __m128i* const G,
125*b2055c35SXin Li                                               const __m128i* const B,
126*b2055c35SXin Li                                               const __m128i* const A,
127*b2055c35SXin Li                                               uint8_t* const dst) {
128*b2055c35SXin Li #if (WEBP_SWAP_16BIT_CSP == 0)
129*b2055c35SXin Li   const __m128i rg0 = _mm_packus_epi16(*R, *G);
130*b2055c35SXin Li   const __m128i ba0 = _mm_packus_epi16(*B, *A);
131*b2055c35SXin Li #else
132*b2055c35SXin Li   const __m128i rg0 = _mm_packus_epi16(*B, *A);
133*b2055c35SXin Li   const __m128i ba0 = _mm_packus_epi16(*R, *G);
134*b2055c35SXin Li #endif
135*b2055c35SXin Li   const __m128i mask_0xf0 = _mm_set1_epi8((char)0xf0);
136*b2055c35SXin Li   const __m128i rb1 = _mm_unpacklo_epi8(rg0, ba0);  // rbrbrbrbrb...
137*b2055c35SXin Li   const __m128i ga1 = _mm_unpackhi_epi8(rg0, ba0);  // gagagagaga...
138*b2055c35SXin Li   const __m128i rb2 = _mm_and_si128(rb1, mask_0xf0);
139*b2055c35SXin Li   const __m128i ga2 = _mm_srli_epi16(_mm_and_si128(ga1, mask_0xf0), 4);
140*b2055c35SXin Li   const __m128i rgba4444 = _mm_or_si128(rb2, ga2);
141*b2055c35SXin Li   _mm_storeu_si128((__m128i*)dst, rgba4444);
142*b2055c35SXin Li }
143*b2055c35SXin Li 
144*b2055c35SXin Li // Pack R/G/B results into 16b output.
PackAndStore565_SSE2(const __m128i * const R,const __m128i * const G,const __m128i * const B,uint8_t * const dst)145*b2055c35SXin Li static WEBP_INLINE void PackAndStore565_SSE2(const __m128i* const R,
146*b2055c35SXin Li                                              const __m128i* const G,
147*b2055c35SXin Li                                              const __m128i* const B,
148*b2055c35SXin Li                                              uint8_t* const dst) {
149*b2055c35SXin Li   const __m128i r0 = _mm_packus_epi16(*R, *R);
150*b2055c35SXin Li   const __m128i g0 = _mm_packus_epi16(*G, *G);
151*b2055c35SXin Li   const __m128i b0 = _mm_packus_epi16(*B, *B);
152*b2055c35SXin Li   const __m128i r1 = _mm_and_si128(r0, _mm_set1_epi8((char)0xf8));
153*b2055c35SXin Li   const __m128i b1 = _mm_and_si128(_mm_srli_epi16(b0, 3), _mm_set1_epi8(0x1f));
154*b2055c35SXin Li   const __m128i g1 =
155*b2055c35SXin Li       _mm_srli_epi16(_mm_and_si128(g0, _mm_set1_epi8((char)0xe0)), 5);
156*b2055c35SXin Li   const __m128i g2 = _mm_slli_epi16(_mm_and_si128(g0, _mm_set1_epi8(0x1c)), 3);
157*b2055c35SXin Li   const __m128i rg = _mm_or_si128(r1, g1);
158*b2055c35SXin Li   const __m128i gb = _mm_or_si128(g2, b1);
159*b2055c35SXin Li #if (WEBP_SWAP_16BIT_CSP == 0)
160*b2055c35SXin Li   const __m128i rgb565 = _mm_unpacklo_epi8(rg, gb);
161*b2055c35SXin Li #else
162*b2055c35SXin Li   const __m128i rgb565 = _mm_unpacklo_epi8(gb, rg);
163*b2055c35SXin Li #endif
164*b2055c35SXin Li   _mm_storeu_si128((__m128i*)dst, rgb565);
165*b2055c35SXin Li }
166*b2055c35SXin Li 
167*b2055c35SXin Li // Pack the planar buffers
168*b2055c35SXin Li // rrrr... rrrr... gggg... gggg... bbbb... bbbb....
169*b2055c35SXin Li // triplet by triplet in the output buffer rgb as rgbrgbrgbrgb ...
PlanarTo24b_SSE2(__m128i * const in0,__m128i * const in1,__m128i * const in2,__m128i * const in3,__m128i * const in4,__m128i * const in5,uint8_t * const rgb)170*b2055c35SXin Li static WEBP_INLINE void PlanarTo24b_SSE2(__m128i* const in0, __m128i* const in1,
171*b2055c35SXin Li                                          __m128i* const in2, __m128i* const in3,
172*b2055c35SXin Li                                          __m128i* const in4, __m128i* const in5,
173*b2055c35SXin Li                                          uint8_t* const rgb) {
174*b2055c35SXin Li   // The input is 6 registers of sixteen 8b but for the sake of explanation,
175*b2055c35SXin Li   // let's take 6 registers of four 8b values.
176*b2055c35SXin Li   // To pack, we will keep taking one every two 8b integer and move it
177*b2055c35SXin Li   // around as follows:
178*b2055c35SXin Li   // Input:
179*b2055c35SXin Li   //   r0r1r2r3 | r4r5r6r7 | g0g1g2g3 | g4g5g6g7 | b0b1b2b3 | b4b5b6b7
180*b2055c35SXin Li   // Split the 6 registers in two sets of 3 registers: the first set as the even
181*b2055c35SXin Li   // 8b bytes, the second the odd ones:
182*b2055c35SXin Li   //   r0r2r4r6 | g0g2g4g6 | b0b2b4b6 | r1r3r5r7 | g1g3g5g7 | b1b3b5b7
183*b2055c35SXin Li   // Repeat the same permutations twice more:
184*b2055c35SXin Li   //   r0r4g0g4 | b0b4r1r5 | g1g5b1b5 | r2r6g2g6 | b2b6r3r7 | g3g7b3b7
185*b2055c35SXin Li   //   r0g0b0r1 | g1b1r2g2 | b2r3g3b3 | r4g4b4r5 | g5b5r6g6 | b6r7g7b7
186*b2055c35SXin Li   VP8PlanarTo24b_SSE2(in0, in1, in2, in3, in4, in5);
187*b2055c35SXin Li 
188*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(rgb +  0), *in0);
189*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(rgb + 16), *in1);
190*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(rgb + 32), *in2);
191*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(rgb + 48), *in3);
192*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(rgb + 64), *in4);
193*b2055c35SXin Li   _mm_storeu_si128((__m128i*)(rgb + 80), *in5);
194*b2055c35SXin Li }
195*b2055c35SXin Li 
VP8YuvToRgba32_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst)196*b2055c35SXin Li void VP8YuvToRgba32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v,
197*b2055c35SXin Li                          uint8_t* dst) {
198*b2055c35SXin Li   const __m128i kAlpha = _mm_set1_epi16(255);
199*b2055c35SXin Li   int n;
200*b2055c35SXin Li   for (n = 0; n < 32; n += 8, dst += 32) {
201*b2055c35SXin Li     __m128i R, G, B;
202*b2055c35SXin Li     YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B);
203*b2055c35SXin Li     PackAndStore4_SSE2(&R, &G, &B, &kAlpha, dst);
204*b2055c35SXin Li   }
205*b2055c35SXin Li }
206*b2055c35SXin Li 
VP8YuvToBgra32_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst)207*b2055c35SXin Li void VP8YuvToBgra32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v,
208*b2055c35SXin Li                          uint8_t* dst) {
209*b2055c35SXin Li   const __m128i kAlpha = _mm_set1_epi16(255);
210*b2055c35SXin Li   int n;
211*b2055c35SXin Li   for (n = 0; n < 32; n += 8, dst += 32) {
212*b2055c35SXin Li     __m128i R, G, B;
213*b2055c35SXin Li     YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B);
214*b2055c35SXin Li     PackAndStore4_SSE2(&B, &G, &R, &kAlpha, dst);
215*b2055c35SXin Li   }
216*b2055c35SXin Li }
217*b2055c35SXin Li 
VP8YuvToArgb32_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst)218*b2055c35SXin Li void VP8YuvToArgb32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v,
219*b2055c35SXin Li                          uint8_t* dst) {
220*b2055c35SXin Li   const __m128i kAlpha = _mm_set1_epi16(255);
221*b2055c35SXin Li   int n;
222*b2055c35SXin Li   for (n = 0; n < 32; n += 8, dst += 32) {
223*b2055c35SXin Li     __m128i R, G, B;
224*b2055c35SXin Li     YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B);
225*b2055c35SXin Li     PackAndStore4_SSE2(&kAlpha, &R, &G, &B, dst);
226*b2055c35SXin Li   }
227*b2055c35SXin Li }
228*b2055c35SXin Li 
VP8YuvToRgba444432_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst)229*b2055c35SXin Li void VP8YuvToRgba444432_SSE2(const uint8_t* y, const uint8_t* u,
230*b2055c35SXin Li                              const uint8_t* v, uint8_t* dst) {
231*b2055c35SXin Li   const __m128i kAlpha = _mm_set1_epi16(255);
232*b2055c35SXin Li   int n;
233*b2055c35SXin Li   for (n = 0; n < 32; n += 8, dst += 16) {
234*b2055c35SXin Li     __m128i R, G, B;
235*b2055c35SXin Li     YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B);
236*b2055c35SXin Li     PackAndStore4444_SSE2(&R, &G, &B, &kAlpha, dst);
237*b2055c35SXin Li   }
238*b2055c35SXin Li }
239*b2055c35SXin Li 
VP8YuvToRgb56532_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst)240*b2055c35SXin Li void VP8YuvToRgb56532_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v,
241*b2055c35SXin Li                            uint8_t* dst) {
242*b2055c35SXin Li   int n;
243*b2055c35SXin Li   for (n = 0; n < 32; n += 8, dst += 16) {
244*b2055c35SXin Li     __m128i R, G, B;
245*b2055c35SXin Li     YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B);
246*b2055c35SXin Li     PackAndStore565_SSE2(&R, &G, &B, dst);
247*b2055c35SXin Li   }
248*b2055c35SXin Li }
249*b2055c35SXin Li 
VP8YuvToRgb32_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst)250*b2055c35SXin Li void VP8YuvToRgb32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v,
251*b2055c35SXin Li                         uint8_t* dst) {
252*b2055c35SXin Li   __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3;
253*b2055c35SXin Li   __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5;
254*b2055c35SXin Li 
255*b2055c35SXin Li   YUV444ToRGB_SSE2(y + 0, u + 0, v + 0, &R0, &G0, &B0);
256*b2055c35SXin Li   YUV444ToRGB_SSE2(y + 8, u + 8, v + 8, &R1, &G1, &B1);
257*b2055c35SXin Li   YUV444ToRGB_SSE2(y + 16, u + 16, v + 16, &R2, &G2, &B2);
258*b2055c35SXin Li   YUV444ToRGB_SSE2(y + 24, u + 24, v + 24, &R3, &G3, &B3);
259*b2055c35SXin Li 
260*b2055c35SXin Li   // Cast to 8b and store as RRRRGGGGBBBB.
261*b2055c35SXin Li   rgb0 = _mm_packus_epi16(R0, R1);
262*b2055c35SXin Li   rgb1 = _mm_packus_epi16(R2, R3);
263*b2055c35SXin Li   rgb2 = _mm_packus_epi16(G0, G1);
264*b2055c35SXin Li   rgb3 = _mm_packus_epi16(G2, G3);
265*b2055c35SXin Li   rgb4 = _mm_packus_epi16(B0, B1);
266*b2055c35SXin Li   rgb5 = _mm_packus_epi16(B2, B3);
267*b2055c35SXin Li 
268*b2055c35SXin Li   // Pack as RGBRGBRGBRGB.
269*b2055c35SXin Li   PlanarTo24b_SSE2(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst);
270*b2055c35SXin Li }
271*b2055c35SXin Li 
VP8YuvToBgr32_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst)272*b2055c35SXin Li void VP8YuvToBgr32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v,
273*b2055c35SXin Li                         uint8_t* dst) {
274*b2055c35SXin Li   __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3;
275*b2055c35SXin Li   __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5;
276*b2055c35SXin Li 
277*b2055c35SXin Li   YUV444ToRGB_SSE2(y +  0, u +  0, v +  0, &R0, &G0, &B0);
278*b2055c35SXin Li   YUV444ToRGB_SSE2(y +  8, u +  8, v +  8, &R1, &G1, &B1);
279*b2055c35SXin Li   YUV444ToRGB_SSE2(y + 16, u + 16, v + 16, &R2, &G2, &B2);
280*b2055c35SXin Li   YUV444ToRGB_SSE2(y + 24, u + 24, v + 24, &R3, &G3, &B3);
281*b2055c35SXin Li 
282*b2055c35SXin Li   // Cast to 8b and store as BBBBGGGGRRRR.
283*b2055c35SXin Li   bgr0 = _mm_packus_epi16(B0, B1);
284*b2055c35SXin Li   bgr1 = _mm_packus_epi16(B2, B3);
285*b2055c35SXin Li   bgr2 = _mm_packus_epi16(G0, G1);
286*b2055c35SXin Li   bgr3 = _mm_packus_epi16(G2, G3);
287*b2055c35SXin Li   bgr4 = _mm_packus_epi16(R0, R1);
288*b2055c35SXin Li   bgr5= _mm_packus_epi16(R2, R3);
289*b2055c35SXin Li 
290*b2055c35SXin Li   // Pack as BGRBGRBGRBGR.
291*b2055c35SXin Li   PlanarTo24b_SSE2(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst);
292*b2055c35SXin Li }
293*b2055c35SXin Li 
294*b2055c35SXin Li //-----------------------------------------------------------------------------
295*b2055c35SXin Li // Arbitrary-length row conversion functions
296*b2055c35SXin Li 
YuvToRgbaRow_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst,int len)297*b2055c35SXin Li static void YuvToRgbaRow_SSE2(const uint8_t* y,
298*b2055c35SXin Li                               const uint8_t* u, const uint8_t* v,
299*b2055c35SXin Li                               uint8_t* dst, int len) {
300*b2055c35SXin Li   const __m128i kAlpha = _mm_set1_epi16(255);
301*b2055c35SXin Li   int n;
302*b2055c35SXin Li   for (n = 0; n + 8 <= len; n += 8, dst += 32) {
303*b2055c35SXin Li     __m128i R, G, B;
304*b2055c35SXin Li     YUV420ToRGB_SSE2(y, u, v, &R, &G, &B);
305*b2055c35SXin Li     PackAndStore4_SSE2(&R, &G, &B, &kAlpha, dst);
306*b2055c35SXin Li     y += 8;
307*b2055c35SXin Li     u += 4;
308*b2055c35SXin Li     v += 4;
309*b2055c35SXin Li   }
310*b2055c35SXin Li   for (; n < len; ++n) {   // Finish off
311*b2055c35SXin Li     VP8YuvToRgba(y[0], u[0], v[0], dst);
312*b2055c35SXin Li     dst += 4;
313*b2055c35SXin Li     y += 1;
314*b2055c35SXin Li     u += (n & 1);
315*b2055c35SXin Li     v += (n & 1);
316*b2055c35SXin Li   }
317*b2055c35SXin Li }
318*b2055c35SXin Li 
YuvToBgraRow_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst,int len)319*b2055c35SXin Li static void YuvToBgraRow_SSE2(const uint8_t* y,
320*b2055c35SXin Li                               const uint8_t* u, const uint8_t* v,
321*b2055c35SXin Li                               uint8_t* dst, int len) {
322*b2055c35SXin Li   const __m128i kAlpha = _mm_set1_epi16(255);
323*b2055c35SXin Li   int n;
324*b2055c35SXin Li   for (n = 0; n + 8 <= len; n += 8, dst += 32) {
325*b2055c35SXin Li     __m128i R, G, B;
326*b2055c35SXin Li     YUV420ToRGB_SSE2(y, u, v, &R, &G, &B);
327*b2055c35SXin Li     PackAndStore4_SSE2(&B, &G, &R, &kAlpha, dst);
328*b2055c35SXin Li     y += 8;
329*b2055c35SXin Li     u += 4;
330*b2055c35SXin Li     v += 4;
331*b2055c35SXin Li   }
332*b2055c35SXin Li   for (; n < len; ++n) {   // Finish off
333*b2055c35SXin Li     VP8YuvToBgra(y[0], u[0], v[0], dst);
334*b2055c35SXin Li     dst += 4;
335*b2055c35SXin Li     y += 1;
336*b2055c35SXin Li     u += (n & 1);
337*b2055c35SXin Li     v += (n & 1);
338*b2055c35SXin Li   }
339*b2055c35SXin Li }
340*b2055c35SXin Li 
YuvToArgbRow_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst,int len)341*b2055c35SXin Li static void YuvToArgbRow_SSE2(const uint8_t* y,
342*b2055c35SXin Li                               const uint8_t* u, const uint8_t* v,
343*b2055c35SXin Li                               uint8_t* dst, int len) {
344*b2055c35SXin Li   const __m128i kAlpha = _mm_set1_epi16(255);
345*b2055c35SXin Li   int n;
346*b2055c35SXin Li   for (n = 0; n + 8 <= len; n += 8, dst += 32) {
347*b2055c35SXin Li     __m128i R, G, B;
348*b2055c35SXin Li     YUV420ToRGB_SSE2(y, u, v, &R, &G, &B);
349*b2055c35SXin Li     PackAndStore4_SSE2(&kAlpha, &R, &G, &B, dst);
350*b2055c35SXin Li     y += 8;
351*b2055c35SXin Li     u += 4;
352*b2055c35SXin Li     v += 4;
353*b2055c35SXin Li   }
354*b2055c35SXin Li   for (; n < len; ++n) {   // Finish off
355*b2055c35SXin Li     VP8YuvToArgb(y[0], u[0], v[0], dst);
356*b2055c35SXin Li     dst += 4;
357*b2055c35SXin Li     y += 1;
358*b2055c35SXin Li     u += (n & 1);
359*b2055c35SXin Li     v += (n & 1);
360*b2055c35SXin Li   }
361*b2055c35SXin Li }
362*b2055c35SXin Li 
YuvToRgbRow_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst,int len)363*b2055c35SXin Li static void YuvToRgbRow_SSE2(const uint8_t* y,
364*b2055c35SXin Li                              const uint8_t* u, const uint8_t* v,
365*b2055c35SXin Li                              uint8_t* dst, int len) {
366*b2055c35SXin Li   int n;
367*b2055c35SXin Li   for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) {
368*b2055c35SXin Li     __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3;
369*b2055c35SXin Li     __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5;
370*b2055c35SXin Li 
371*b2055c35SXin Li     YUV420ToRGB_SSE2(y +  0, u +  0, v +  0, &R0, &G0, &B0);
372*b2055c35SXin Li     YUV420ToRGB_SSE2(y +  8, u +  4, v +  4, &R1, &G1, &B1);
373*b2055c35SXin Li     YUV420ToRGB_SSE2(y + 16, u +  8, v +  8, &R2, &G2, &B2);
374*b2055c35SXin Li     YUV420ToRGB_SSE2(y + 24, u + 12, v + 12, &R3, &G3, &B3);
375*b2055c35SXin Li 
376*b2055c35SXin Li     // Cast to 8b and store as RRRRGGGGBBBB.
377*b2055c35SXin Li     rgb0 = _mm_packus_epi16(R0, R1);
378*b2055c35SXin Li     rgb1 = _mm_packus_epi16(R2, R3);
379*b2055c35SXin Li     rgb2 = _mm_packus_epi16(G0, G1);
380*b2055c35SXin Li     rgb3 = _mm_packus_epi16(G2, G3);
381*b2055c35SXin Li     rgb4 = _mm_packus_epi16(B0, B1);
382*b2055c35SXin Li     rgb5 = _mm_packus_epi16(B2, B3);
383*b2055c35SXin Li 
384*b2055c35SXin Li     // Pack as RGBRGBRGBRGB.
385*b2055c35SXin Li     PlanarTo24b_SSE2(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst);
386*b2055c35SXin Li 
387*b2055c35SXin Li     y += 32;
388*b2055c35SXin Li     u += 16;
389*b2055c35SXin Li     v += 16;
390*b2055c35SXin Li   }
391*b2055c35SXin Li   for (; n < len; ++n) {   // Finish off
392*b2055c35SXin Li     VP8YuvToRgb(y[0], u[0], v[0], dst);
393*b2055c35SXin Li     dst += 3;
394*b2055c35SXin Li     y += 1;
395*b2055c35SXin Li     u += (n & 1);
396*b2055c35SXin Li     v += (n & 1);
397*b2055c35SXin Li   }
398*b2055c35SXin Li }
399*b2055c35SXin Li 
YuvToBgrRow_SSE2(const uint8_t * y,const uint8_t * u,const uint8_t * v,uint8_t * dst,int len)400*b2055c35SXin Li static void YuvToBgrRow_SSE2(const uint8_t* y,
401*b2055c35SXin Li                              const uint8_t* u, const uint8_t* v,
402*b2055c35SXin Li                              uint8_t* dst, int len) {
403*b2055c35SXin Li   int n;
404*b2055c35SXin Li   for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) {
405*b2055c35SXin Li     __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3;
406*b2055c35SXin Li     __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5;
407*b2055c35SXin Li 
408*b2055c35SXin Li     YUV420ToRGB_SSE2(y +  0, u +  0, v +  0, &R0, &G0, &B0);
409*b2055c35SXin Li     YUV420ToRGB_SSE2(y +  8, u +  4, v +  4, &R1, &G1, &B1);
410*b2055c35SXin Li     YUV420ToRGB_SSE2(y + 16, u +  8, v +  8, &R2, &G2, &B2);
411*b2055c35SXin Li     YUV420ToRGB_SSE2(y + 24, u + 12, v + 12, &R3, &G3, &B3);
412*b2055c35SXin Li 
413*b2055c35SXin Li     // Cast to 8b and store as BBBBGGGGRRRR.
414*b2055c35SXin Li     bgr0 = _mm_packus_epi16(B0, B1);
415*b2055c35SXin Li     bgr1 = _mm_packus_epi16(B2, B3);
416*b2055c35SXin Li     bgr2 = _mm_packus_epi16(G0, G1);
417*b2055c35SXin Li     bgr3 = _mm_packus_epi16(G2, G3);
418*b2055c35SXin Li     bgr4 = _mm_packus_epi16(R0, R1);
419*b2055c35SXin Li     bgr5 = _mm_packus_epi16(R2, R3);
420*b2055c35SXin Li 
421*b2055c35SXin Li     // Pack as BGRBGRBGRBGR.
422*b2055c35SXin Li     PlanarTo24b_SSE2(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst);
423*b2055c35SXin Li 
424*b2055c35SXin Li     y += 32;
425*b2055c35SXin Li     u += 16;
426*b2055c35SXin Li     v += 16;
427*b2055c35SXin Li   }
428*b2055c35SXin Li   for (; n < len; ++n) {   // Finish off
429*b2055c35SXin Li     VP8YuvToBgr(y[0], u[0], v[0], dst);
430*b2055c35SXin Li     dst += 3;
431*b2055c35SXin Li     y += 1;
432*b2055c35SXin Li     u += (n & 1);
433*b2055c35SXin Li     v += (n & 1);
434*b2055c35SXin Li   }
435*b2055c35SXin Li }
436*b2055c35SXin Li 
437*b2055c35SXin Li //------------------------------------------------------------------------------
438*b2055c35SXin Li // Entry point
439*b2055c35SXin Li 
440*b2055c35SXin Li extern void WebPInitSamplersSSE2(void);
441*b2055c35SXin Li 
WebPInitSamplersSSE2(void)442*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void WebPInitSamplersSSE2(void) {
443*b2055c35SXin Li   WebPSamplers[MODE_RGB]  = YuvToRgbRow_SSE2;
444*b2055c35SXin Li   WebPSamplers[MODE_RGBA] = YuvToRgbaRow_SSE2;
445*b2055c35SXin Li   WebPSamplers[MODE_BGR]  = YuvToBgrRow_SSE2;
446*b2055c35SXin Li   WebPSamplers[MODE_BGRA] = YuvToBgraRow_SSE2;
447*b2055c35SXin Li   WebPSamplers[MODE_ARGB] = YuvToArgbRow_SSE2;
448*b2055c35SXin Li }
449*b2055c35SXin Li 
450*b2055c35SXin Li //------------------------------------------------------------------------------
451*b2055c35SXin Li // RGB24/32 -> YUV converters
452*b2055c35SXin Li 
453*b2055c35SXin Li // Load eight 16b-words from *src.
454*b2055c35SXin Li #define LOAD_16(src) _mm_loadu_si128((const __m128i*)(src))
455*b2055c35SXin Li // Store either 16b-words into *dst
456*b2055c35SXin Li #define STORE_16(V, dst) _mm_storeu_si128((__m128i*)(dst), (V))
457*b2055c35SXin Li 
458*b2055c35SXin Li // Function that inserts a value of the second half of the in buffer in between
459*b2055c35SXin Li // every two char of the first half.
RGB24PackedToPlanarHelper_SSE2(const __m128i * const in,__m128i * const out)460*b2055c35SXin Li static WEBP_INLINE void RGB24PackedToPlanarHelper_SSE2(
461*b2055c35SXin Li     const __m128i* const in /*in[6]*/, __m128i* const out /*out[6]*/) {
462*b2055c35SXin Li   out[0] = _mm_unpacklo_epi8(in[0], in[3]);
463*b2055c35SXin Li   out[1] = _mm_unpackhi_epi8(in[0], in[3]);
464*b2055c35SXin Li   out[2] = _mm_unpacklo_epi8(in[1], in[4]);
465*b2055c35SXin Li   out[3] = _mm_unpackhi_epi8(in[1], in[4]);
466*b2055c35SXin Li   out[4] = _mm_unpacklo_epi8(in[2], in[5]);
467*b2055c35SXin Li   out[5] = _mm_unpackhi_epi8(in[2], in[5]);
468*b2055c35SXin Li }
469*b2055c35SXin Li 
470*b2055c35SXin Li // Unpack the 8b input rgbrgbrgbrgb ... as contiguous registers:
471*b2055c35SXin Li // rrrr... rrrr... gggg... gggg... bbbb... bbbb....
472*b2055c35SXin Li // Similar to PlanarTo24bHelper(), but in reverse order.
RGB24PackedToPlanar_SSE2(const uint8_t * const rgb,__m128i * const out)473*b2055c35SXin Li static WEBP_INLINE void RGB24PackedToPlanar_SSE2(
474*b2055c35SXin Li     const uint8_t* const rgb, __m128i* const out /*out[6]*/) {
475*b2055c35SXin Li   __m128i tmp[6];
476*b2055c35SXin Li   tmp[0] = _mm_loadu_si128((const __m128i*)(rgb +  0));
477*b2055c35SXin Li   tmp[1] = _mm_loadu_si128((const __m128i*)(rgb + 16));
478*b2055c35SXin Li   tmp[2] = _mm_loadu_si128((const __m128i*)(rgb + 32));
479*b2055c35SXin Li   tmp[3] = _mm_loadu_si128((const __m128i*)(rgb + 48));
480*b2055c35SXin Li   tmp[4] = _mm_loadu_si128((const __m128i*)(rgb + 64));
481*b2055c35SXin Li   tmp[5] = _mm_loadu_si128((const __m128i*)(rgb + 80));
482*b2055c35SXin Li 
483*b2055c35SXin Li   RGB24PackedToPlanarHelper_SSE2(tmp, out);
484*b2055c35SXin Li   RGB24PackedToPlanarHelper_SSE2(out, tmp);
485*b2055c35SXin Li   RGB24PackedToPlanarHelper_SSE2(tmp, out);
486*b2055c35SXin Li   RGB24PackedToPlanarHelper_SSE2(out, tmp);
487*b2055c35SXin Li   RGB24PackedToPlanarHelper_SSE2(tmp, out);
488*b2055c35SXin Li }
489*b2055c35SXin Li 
490*b2055c35SXin Li // Convert 8 packed ARGB to r[], g[], b[]
RGB32PackedToPlanar_SSE2(const uint32_t * const argb,__m128i * const rgb)491*b2055c35SXin Li static WEBP_INLINE void RGB32PackedToPlanar_SSE2(const uint32_t* const argb,
492*b2055c35SXin Li                                                  __m128i* const rgb /*in[6]*/) {
493*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
494*b2055c35SXin Li   __m128i a0 = LOAD_16(argb + 0);
495*b2055c35SXin Li   __m128i a1 = LOAD_16(argb + 4);
496*b2055c35SXin Li   __m128i a2 = LOAD_16(argb + 8);
497*b2055c35SXin Li   __m128i a3 = LOAD_16(argb + 12);
498*b2055c35SXin Li   VP8L32bToPlanar_SSE2(&a0, &a1, &a2, &a3);
499*b2055c35SXin Li   rgb[0] = _mm_unpacklo_epi8(a1, zero);
500*b2055c35SXin Li   rgb[1] = _mm_unpackhi_epi8(a1, zero);
501*b2055c35SXin Li   rgb[2] = _mm_unpacklo_epi8(a2, zero);
502*b2055c35SXin Li   rgb[3] = _mm_unpackhi_epi8(a2, zero);
503*b2055c35SXin Li   rgb[4] = _mm_unpacklo_epi8(a3, zero);
504*b2055c35SXin Li   rgb[5] = _mm_unpackhi_epi8(a3, zero);
505*b2055c35SXin Li }
506*b2055c35SXin Li 
507*b2055c35SXin Li // This macro computes (RG * MULT_RG + GB * MULT_GB + ROUNDER) >> DESCALE_FIX
508*b2055c35SXin Li // It's a macro and not a function because we need to use immediate values with
509*b2055c35SXin Li // srai_epi32, e.g.
510*b2055c35SXin Li #define TRANSFORM(RG_LO, RG_HI, GB_LO, GB_HI, MULT_RG, MULT_GB, \
511*b2055c35SXin Li                   ROUNDER, DESCALE_FIX, OUT) do {               \
512*b2055c35SXin Li   const __m128i V0_lo = _mm_madd_epi16(RG_LO, MULT_RG);         \
513*b2055c35SXin Li   const __m128i V0_hi = _mm_madd_epi16(RG_HI, MULT_RG);         \
514*b2055c35SXin Li   const __m128i V1_lo = _mm_madd_epi16(GB_LO, MULT_GB);         \
515*b2055c35SXin Li   const __m128i V1_hi = _mm_madd_epi16(GB_HI, MULT_GB);         \
516*b2055c35SXin Li   const __m128i V2_lo = _mm_add_epi32(V0_lo, V1_lo);            \
517*b2055c35SXin Li   const __m128i V2_hi = _mm_add_epi32(V0_hi, V1_hi);            \
518*b2055c35SXin Li   const __m128i V3_lo = _mm_add_epi32(V2_lo, ROUNDER);          \
519*b2055c35SXin Li   const __m128i V3_hi = _mm_add_epi32(V2_hi, ROUNDER);          \
520*b2055c35SXin Li   const __m128i V5_lo = _mm_srai_epi32(V3_lo, DESCALE_FIX);     \
521*b2055c35SXin Li   const __m128i V5_hi = _mm_srai_epi32(V3_hi, DESCALE_FIX);     \
522*b2055c35SXin Li   (OUT) = _mm_packs_epi32(V5_lo, V5_hi);                        \
523*b2055c35SXin Li } while (0)
524*b2055c35SXin Li 
525*b2055c35SXin Li #define MK_CST_16(A, B) _mm_set_epi16((B), (A), (B), (A), (B), (A), (B), (A))
ConvertRGBToY_SSE2(const __m128i * const R,const __m128i * const G,const __m128i * const B,__m128i * const Y)526*b2055c35SXin Li static WEBP_INLINE void ConvertRGBToY_SSE2(const __m128i* const R,
527*b2055c35SXin Li                                            const __m128i* const G,
528*b2055c35SXin Li                                            const __m128i* const B,
529*b2055c35SXin Li                                            __m128i* const Y) {
530*b2055c35SXin Li   const __m128i kRG_y = MK_CST_16(16839, 33059 - 16384);
531*b2055c35SXin Li   const __m128i kGB_y = MK_CST_16(16384, 6420);
532*b2055c35SXin Li   const __m128i kHALF_Y = _mm_set1_epi32((16 << YUV_FIX) + YUV_HALF);
533*b2055c35SXin Li 
534*b2055c35SXin Li   const __m128i RG_lo = _mm_unpacklo_epi16(*R, *G);
535*b2055c35SXin Li   const __m128i RG_hi = _mm_unpackhi_epi16(*R, *G);
536*b2055c35SXin Li   const __m128i GB_lo = _mm_unpacklo_epi16(*G, *B);
537*b2055c35SXin Li   const __m128i GB_hi = _mm_unpackhi_epi16(*G, *B);
538*b2055c35SXin Li   TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_y, kGB_y, kHALF_Y, YUV_FIX, *Y);
539*b2055c35SXin Li }
540*b2055c35SXin Li 
ConvertRGBToUV_SSE2(const __m128i * const R,const __m128i * const G,const __m128i * const B,__m128i * const U,__m128i * const V)541*b2055c35SXin Li static WEBP_INLINE void ConvertRGBToUV_SSE2(const __m128i* const R,
542*b2055c35SXin Li                                             const __m128i* const G,
543*b2055c35SXin Li                                             const __m128i* const B,
544*b2055c35SXin Li                                             __m128i* const U,
545*b2055c35SXin Li                                             __m128i* const V) {
546*b2055c35SXin Li   const __m128i kRG_u = MK_CST_16(-9719, -19081);
547*b2055c35SXin Li   const __m128i kGB_u = MK_CST_16(0, 28800);
548*b2055c35SXin Li   const __m128i kRG_v = MK_CST_16(28800, 0);
549*b2055c35SXin Li   const __m128i kGB_v = MK_CST_16(-24116, -4684);
550*b2055c35SXin Li   const __m128i kHALF_UV = _mm_set1_epi32(((128 << YUV_FIX) + YUV_HALF) << 2);
551*b2055c35SXin Li 
552*b2055c35SXin Li   const __m128i RG_lo = _mm_unpacklo_epi16(*R, *G);
553*b2055c35SXin Li   const __m128i RG_hi = _mm_unpackhi_epi16(*R, *G);
554*b2055c35SXin Li   const __m128i GB_lo = _mm_unpacklo_epi16(*G, *B);
555*b2055c35SXin Li   const __m128i GB_hi = _mm_unpackhi_epi16(*G, *B);
556*b2055c35SXin Li   TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_u, kGB_u,
557*b2055c35SXin Li             kHALF_UV, YUV_FIX + 2, *U);
558*b2055c35SXin Li   TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_v, kGB_v,
559*b2055c35SXin Li             kHALF_UV, YUV_FIX + 2, *V);
560*b2055c35SXin Li }
561*b2055c35SXin Li 
562*b2055c35SXin Li #undef MK_CST_16
563*b2055c35SXin Li #undef TRANSFORM
564*b2055c35SXin Li 
ConvertRGB24ToY_SSE2(const uint8_t * rgb,uint8_t * y,int width)565*b2055c35SXin Li static void ConvertRGB24ToY_SSE2(const uint8_t* rgb, uint8_t* y, int width) {
566*b2055c35SXin Li   const int max_width = width & ~31;
567*b2055c35SXin Li   int i;
568*b2055c35SXin Li   for (i = 0; i < max_width; rgb += 3 * 16 * 2) {
569*b2055c35SXin Li     __m128i rgb_plane[6];
570*b2055c35SXin Li     int j;
571*b2055c35SXin Li 
572*b2055c35SXin Li     RGB24PackedToPlanar_SSE2(rgb, rgb_plane);
573*b2055c35SXin Li 
574*b2055c35SXin Li     for (j = 0; j < 2; ++j, i += 16) {
575*b2055c35SXin Li       const __m128i zero = _mm_setzero_si128();
576*b2055c35SXin Li       __m128i r, g, b, Y0, Y1;
577*b2055c35SXin Li 
578*b2055c35SXin Li       // Convert to 16-bit Y.
579*b2055c35SXin Li       r = _mm_unpacklo_epi8(rgb_plane[0 + j], zero);
580*b2055c35SXin Li       g = _mm_unpacklo_epi8(rgb_plane[2 + j], zero);
581*b2055c35SXin Li       b = _mm_unpacklo_epi8(rgb_plane[4 + j], zero);
582*b2055c35SXin Li       ConvertRGBToY_SSE2(&r, &g, &b, &Y0);
583*b2055c35SXin Li 
584*b2055c35SXin Li       // Convert to 16-bit Y.
585*b2055c35SXin Li       r = _mm_unpackhi_epi8(rgb_plane[0 + j], zero);
586*b2055c35SXin Li       g = _mm_unpackhi_epi8(rgb_plane[2 + j], zero);
587*b2055c35SXin Li       b = _mm_unpackhi_epi8(rgb_plane[4 + j], zero);
588*b2055c35SXin Li       ConvertRGBToY_SSE2(&r, &g, &b, &Y1);
589*b2055c35SXin Li 
590*b2055c35SXin Li       // Cast to 8-bit and store.
591*b2055c35SXin Li       STORE_16(_mm_packus_epi16(Y0, Y1), y + i);
592*b2055c35SXin Li     }
593*b2055c35SXin Li   }
594*b2055c35SXin Li   for (; i < width; ++i, rgb += 3) {   // left-over
595*b2055c35SXin Li     y[i] = VP8RGBToY(rgb[0], rgb[1], rgb[2], YUV_HALF);
596*b2055c35SXin Li   }
597*b2055c35SXin Li }
598*b2055c35SXin Li 
ConvertBGR24ToY_SSE2(const uint8_t * bgr,uint8_t * y,int width)599*b2055c35SXin Li static void ConvertBGR24ToY_SSE2(const uint8_t* bgr, uint8_t* y, int width) {
600*b2055c35SXin Li   const int max_width = width & ~31;
601*b2055c35SXin Li   int i;
602*b2055c35SXin Li   for (i = 0; i < max_width; bgr += 3 * 16 * 2) {
603*b2055c35SXin Li     __m128i bgr_plane[6];
604*b2055c35SXin Li     int j;
605*b2055c35SXin Li 
606*b2055c35SXin Li     RGB24PackedToPlanar_SSE2(bgr, bgr_plane);
607*b2055c35SXin Li 
608*b2055c35SXin Li     for (j = 0; j < 2; ++j, i += 16) {
609*b2055c35SXin Li       const __m128i zero = _mm_setzero_si128();
610*b2055c35SXin Li       __m128i r, g, b, Y0, Y1;
611*b2055c35SXin Li 
612*b2055c35SXin Li       // Convert to 16-bit Y.
613*b2055c35SXin Li       b = _mm_unpacklo_epi8(bgr_plane[0 + j], zero);
614*b2055c35SXin Li       g = _mm_unpacklo_epi8(bgr_plane[2 + j], zero);
615*b2055c35SXin Li       r = _mm_unpacklo_epi8(bgr_plane[4 + j], zero);
616*b2055c35SXin Li       ConvertRGBToY_SSE2(&r, &g, &b, &Y0);
617*b2055c35SXin Li 
618*b2055c35SXin Li       // Convert to 16-bit Y.
619*b2055c35SXin Li       b = _mm_unpackhi_epi8(bgr_plane[0 + j], zero);
620*b2055c35SXin Li       g = _mm_unpackhi_epi8(bgr_plane[2 + j], zero);
621*b2055c35SXin Li       r = _mm_unpackhi_epi8(bgr_plane[4 + j], zero);
622*b2055c35SXin Li       ConvertRGBToY_SSE2(&r, &g, &b, &Y1);
623*b2055c35SXin Li 
624*b2055c35SXin Li       // Cast to 8-bit and store.
625*b2055c35SXin Li       STORE_16(_mm_packus_epi16(Y0, Y1), y + i);
626*b2055c35SXin Li     }
627*b2055c35SXin Li   }
628*b2055c35SXin Li   for (; i < width; ++i, bgr += 3) {  // left-over
629*b2055c35SXin Li     y[i] = VP8RGBToY(bgr[2], bgr[1], bgr[0], YUV_HALF);
630*b2055c35SXin Li   }
631*b2055c35SXin Li }
632*b2055c35SXin Li 
ConvertARGBToY_SSE2(const uint32_t * argb,uint8_t * y,int width)633*b2055c35SXin Li static void ConvertARGBToY_SSE2(const uint32_t* argb, uint8_t* y, int width) {
634*b2055c35SXin Li   const int max_width = width & ~15;
635*b2055c35SXin Li   int i;
636*b2055c35SXin Li   for (i = 0; i < max_width; i += 16) {
637*b2055c35SXin Li     __m128i Y0, Y1, rgb[6];
638*b2055c35SXin Li     RGB32PackedToPlanar_SSE2(&argb[i], rgb);
639*b2055c35SXin Li     ConvertRGBToY_SSE2(&rgb[0], &rgb[2], &rgb[4], &Y0);
640*b2055c35SXin Li     ConvertRGBToY_SSE2(&rgb[1], &rgb[3], &rgb[5], &Y1);
641*b2055c35SXin Li     STORE_16(_mm_packus_epi16(Y0, Y1), y + i);
642*b2055c35SXin Li   }
643*b2055c35SXin Li   for (; i < width; ++i) {   // left-over
644*b2055c35SXin Li     const uint32_t p = argb[i];
645*b2055c35SXin Li     y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >>  0) & 0xff,
646*b2055c35SXin Li                      YUV_HALF);
647*b2055c35SXin Li   }
648*b2055c35SXin Li }
649*b2055c35SXin Li 
650*b2055c35SXin Li // Horizontal add (doubled) of two 16b values, result is 16b.
651*b2055c35SXin Li // in: A | B | C | D | ... -> out: 2*(A+B) | 2*(C+D) | ...
HorizontalAddPack_SSE2(const __m128i * const A,const __m128i * const B,__m128i * const out)652*b2055c35SXin Li static void HorizontalAddPack_SSE2(const __m128i* const A,
653*b2055c35SXin Li                                    const __m128i* const B,
654*b2055c35SXin Li                                    __m128i* const out) {
655*b2055c35SXin Li   const __m128i k2 = _mm_set1_epi16(2);
656*b2055c35SXin Li   const __m128i C = _mm_madd_epi16(*A, k2);
657*b2055c35SXin Li   const __m128i D = _mm_madd_epi16(*B, k2);
658*b2055c35SXin Li   *out = _mm_packs_epi32(C, D);
659*b2055c35SXin Li }
660*b2055c35SXin Li 
ConvertARGBToUV_SSE2(const uint32_t * argb,uint8_t * u,uint8_t * v,int src_width,int do_store)661*b2055c35SXin Li static void ConvertARGBToUV_SSE2(const uint32_t* argb,
662*b2055c35SXin Li                                  uint8_t* u, uint8_t* v,
663*b2055c35SXin Li                                  int src_width, int do_store) {
664*b2055c35SXin Li   const int max_width = src_width & ~31;
665*b2055c35SXin Li   int i;
666*b2055c35SXin Li   for (i = 0; i < max_width; i += 32, u += 16, v += 16) {
667*b2055c35SXin Li     __m128i rgb[6], U0, V0, U1, V1;
668*b2055c35SXin Li     RGB32PackedToPlanar_SSE2(&argb[i], rgb);
669*b2055c35SXin Li     HorizontalAddPack_SSE2(&rgb[0], &rgb[1], &rgb[0]);
670*b2055c35SXin Li     HorizontalAddPack_SSE2(&rgb[2], &rgb[3], &rgb[2]);
671*b2055c35SXin Li     HorizontalAddPack_SSE2(&rgb[4], &rgb[5], &rgb[4]);
672*b2055c35SXin Li     ConvertRGBToUV_SSE2(&rgb[0], &rgb[2], &rgb[4], &U0, &V0);
673*b2055c35SXin Li 
674*b2055c35SXin Li     RGB32PackedToPlanar_SSE2(&argb[i + 16], rgb);
675*b2055c35SXin Li     HorizontalAddPack_SSE2(&rgb[0], &rgb[1], &rgb[0]);
676*b2055c35SXin Li     HorizontalAddPack_SSE2(&rgb[2], &rgb[3], &rgb[2]);
677*b2055c35SXin Li     HorizontalAddPack_SSE2(&rgb[4], &rgb[5], &rgb[4]);
678*b2055c35SXin Li     ConvertRGBToUV_SSE2(&rgb[0], &rgb[2], &rgb[4], &U1, &V1);
679*b2055c35SXin Li 
680*b2055c35SXin Li     U0 = _mm_packus_epi16(U0, U1);
681*b2055c35SXin Li     V0 = _mm_packus_epi16(V0, V1);
682*b2055c35SXin Li     if (!do_store) {
683*b2055c35SXin Li       const __m128i prev_u = LOAD_16(u);
684*b2055c35SXin Li       const __m128i prev_v = LOAD_16(v);
685*b2055c35SXin Li       U0 = _mm_avg_epu8(U0, prev_u);
686*b2055c35SXin Li       V0 = _mm_avg_epu8(V0, prev_v);
687*b2055c35SXin Li     }
688*b2055c35SXin Li     STORE_16(U0, u);
689*b2055c35SXin Li     STORE_16(V0, v);
690*b2055c35SXin Li   }
691*b2055c35SXin Li   if (i < src_width) {  // left-over
692*b2055c35SXin Li     WebPConvertARGBToUV_C(argb + i, u, v, src_width - i, do_store);
693*b2055c35SXin Li   }
694*b2055c35SXin Li }
695*b2055c35SXin Li 
696*b2055c35SXin Li // Convert 16 packed ARGB 16b-values to r[], g[], b[]
RGBA32PackedToPlanar_16b_SSE2(const uint16_t * const rgbx,__m128i * const r,__m128i * const g,__m128i * const b)697*b2055c35SXin Li static WEBP_INLINE void RGBA32PackedToPlanar_16b_SSE2(
698*b2055c35SXin Li     const uint16_t* const rgbx,
699*b2055c35SXin Li     __m128i* const r, __m128i* const g, __m128i* const b) {
700*b2055c35SXin Li   const __m128i in0 = LOAD_16(rgbx +  0);  // r0 | g0 | b0 |x| r1 | g1 | b1 |x
701*b2055c35SXin Li   const __m128i in1 = LOAD_16(rgbx +  8);  // r2 | g2 | b2 |x| r3 | g3 | b3 |x
702*b2055c35SXin Li   const __m128i in2 = LOAD_16(rgbx + 16);  // r4 | ...
703*b2055c35SXin Li   const __m128i in3 = LOAD_16(rgbx + 24);  // r6 | ...
704*b2055c35SXin Li   // column-wise transpose
705*b2055c35SXin Li   const __m128i A0 = _mm_unpacklo_epi16(in0, in1);
706*b2055c35SXin Li   const __m128i A1 = _mm_unpackhi_epi16(in0, in1);
707*b2055c35SXin Li   const __m128i A2 = _mm_unpacklo_epi16(in2, in3);
708*b2055c35SXin Li   const __m128i A3 = _mm_unpackhi_epi16(in2, in3);
709*b2055c35SXin Li   const __m128i B0 = _mm_unpacklo_epi16(A0, A1);  // r0 r1 r2 r3 | g0 g1 ..
710*b2055c35SXin Li   const __m128i B1 = _mm_unpackhi_epi16(A0, A1);  // b0 b1 b2 b3 | x x x x
711*b2055c35SXin Li   const __m128i B2 = _mm_unpacklo_epi16(A2, A3);  // r4 r5 r6 r7 | g4 g5 ..
712*b2055c35SXin Li   const __m128i B3 = _mm_unpackhi_epi16(A2, A3);  // b4 b5 b6 b7 | x x x x
713*b2055c35SXin Li   *r = _mm_unpacklo_epi64(B0, B2);
714*b2055c35SXin Li   *g = _mm_unpackhi_epi64(B0, B2);
715*b2055c35SXin Li   *b = _mm_unpacklo_epi64(B1, B3);
716*b2055c35SXin Li }
717*b2055c35SXin Li 
ConvertRGBA32ToUV_SSE2(const uint16_t * rgb,uint8_t * u,uint8_t * v,int width)718*b2055c35SXin Li static void ConvertRGBA32ToUV_SSE2(const uint16_t* rgb,
719*b2055c35SXin Li                                    uint8_t* u, uint8_t* v, int width) {
720*b2055c35SXin Li   const int max_width = width & ~15;
721*b2055c35SXin Li   const uint16_t* const last_rgb = rgb + 4 * max_width;
722*b2055c35SXin Li   while (rgb < last_rgb) {
723*b2055c35SXin Li     __m128i r, g, b, U0, V0, U1, V1;
724*b2055c35SXin Li     RGBA32PackedToPlanar_16b_SSE2(rgb +  0, &r, &g, &b);
725*b2055c35SXin Li     ConvertRGBToUV_SSE2(&r, &g, &b, &U0, &V0);
726*b2055c35SXin Li     RGBA32PackedToPlanar_16b_SSE2(rgb + 32, &r, &g, &b);
727*b2055c35SXin Li     ConvertRGBToUV_SSE2(&r, &g, &b, &U1, &V1);
728*b2055c35SXin Li     STORE_16(_mm_packus_epi16(U0, U1), u);
729*b2055c35SXin Li     STORE_16(_mm_packus_epi16(V0, V1), v);
730*b2055c35SXin Li     u += 16;
731*b2055c35SXin Li     v += 16;
732*b2055c35SXin Li     rgb += 2 * 32;
733*b2055c35SXin Li   }
734*b2055c35SXin Li   if (max_width < width) {  // left-over
735*b2055c35SXin Li     WebPConvertRGBA32ToUV_C(rgb, u, v, width - max_width);
736*b2055c35SXin Li   }
737*b2055c35SXin Li }
738*b2055c35SXin Li 
739*b2055c35SXin Li //------------------------------------------------------------------------------
740*b2055c35SXin Li 
741*b2055c35SXin Li extern void WebPInitConvertARGBToYUVSSE2(void);
742*b2055c35SXin Li 
WebPInitConvertARGBToYUVSSE2(void)743*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void WebPInitConvertARGBToYUVSSE2(void) {
744*b2055c35SXin Li   WebPConvertARGBToY = ConvertARGBToY_SSE2;
745*b2055c35SXin Li   WebPConvertARGBToUV = ConvertARGBToUV_SSE2;
746*b2055c35SXin Li 
747*b2055c35SXin Li   WebPConvertRGB24ToY = ConvertRGB24ToY_SSE2;
748*b2055c35SXin Li   WebPConvertBGR24ToY = ConvertBGR24ToY_SSE2;
749*b2055c35SXin Li 
750*b2055c35SXin Li   WebPConvertRGBA32ToUV = ConvertRGBA32ToUV_SSE2;
751*b2055c35SXin Li }
752*b2055c35SXin Li 
753*b2055c35SXin Li #else  // !WEBP_USE_SSE2
754*b2055c35SXin Li 
755*b2055c35SXin Li WEBP_DSP_INIT_STUB(WebPInitSamplersSSE2)
756*b2055c35SXin Li WEBP_DSP_INIT_STUB(WebPInitConvertARGBToYUVSSE2)
757*b2055c35SXin Li 
758*b2055c35SXin Li #endif  // WEBP_USE_SSE2
759