xref: /aosp_15_r20/external/webp/src/dsp/alpha_processing_sse2.c (revision b2055c353e87c8814eb2b6b1b11112a1562253bd)
1*b2055c35SXin Li // Copyright 2014 Google Inc. All Rights Reserved.
2*b2055c35SXin Li //
3*b2055c35SXin Li // Use of this source code is governed by a BSD-style license
4*b2055c35SXin Li // that can be found in the COPYING file in the root of the source
5*b2055c35SXin Li // tree. An additional intellectual property rights grant can be found
6*b2055c35SXin Li // in the file PATENTS. All contributing project authors may
7*b2055c35SXin Li // be found in the AUTHORS file in the root of the source tree.
8*b2055c35SXin Li // -----------------------------------------------------------------------------
9*b2055c35SXin Li //
10*b2055c35SXin Li // Utilities for processing transparent channel.
11*b2055c35SXin Li //
12*b2055c35SXin Li // Author: Skal ([email protected])
13*b2055c35SXin Li 
14*b2055c35SXin Li #include "src/dsp/dsp.h"
15*b2055c35SXin Li 
16*b2055c35SXin Li #if defined(WEBP_USE_SSE2)
17*b2055c35SXin Li #include <emmintrin.h>
18*b2055c35SXin Li 
19*b2055c35SXin Li //------------------------------------------------------------------------------
20*b2055c35SXin Li 
DispatchAlpha_SSE2(const uint8_t * WEBP_RESTRICT alpha,int alpha_stride,int width,int height,uint8_t * WEBP_RESTRICT dst,int dst_stride)21*b2055c35SXin Li static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
22*b2055c35SXin Li                               int alpha_stride, int width, int height,
23*b2055c35SXin Li                               uint8_t* WEBP_RESTRICT dst, int dst_stride) {
24*b2055c35SXin Li   // alpha_and stores an 'and' operation of all the alpha[] values. The final
25*b2055c35SXin Li   // value is not 0xff if any of the alpha[] is not equal to 0xff.
26*b2055c35SXin Li   uint32_t alpha_and = 0xff;
27*b2055c35SXin Li   int i, j;
28*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
29*b2055c35SXin Li   const __m128i rgb_mask = _mm_set1_epi32((int)0xffffff00);  // to preserve RGB
30*b2055c35SXin Li   const __m128i all_0xff = _mm_set_epi32(0, 0, ~0, ~0);
31*b2055c35SXin Li   __m128i all_alphas = all_0xff;
32*b2055c35SXin Li 
33*b2055c35SXin Li   // We must be able to access 3 extra bytes after the last written byte
34*b2055c35SXin Li   // 'dst[4 * width - 4]', because we don't know if alpha is the first or the
35*b2055c35SXin Li   // last byte of the quadruplet.
36*b2055c35SXin Li   const int limit = (width - 1) & ~7;
37*b2055c35SXin Li 
38*b2055c35SXin Li   for (j = 0; j < height; ++j) {
39*b2055c35SXin Li     __m128i* out = (__m128i*)dst;
40*b2055c35SXin Li     for (i = 0; i < limit; i += 8) {
41*b2055c35SXin Li       // load 8 alpha bytes
42*b2055c35SXin Li       const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[i]);
43*b2055c35SXin Li       const __m128i a1 = _mm_unpacklo_epi8(a0, zero);
44*b2055c35SXin Li       const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);
45*b2055c35SXin Li       const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);
46*b2055c35SXin Li       // load 8 dst pixels (32 bytes)
47*b2055c35SXin Li       const __m128i b0_lo = _mm_loadu_si128(out + 0);
48*b2055c35SXin Li       const __m128i b0_hi = _mm_loadu_si128(out + 1);
49*b2055c35SXin Li       // mask dst alpha values
50*b2055c35SXin Li       const __m128i b1_lo = _mm_and_si128(b0_lo, rgb_mask);
51*b2055c35SXin Li       const __m128i b1_hi = _mm_and_si128(b0_hi, rgb_mask);
52*b2055c35SXin Li       // combine
53*b2055c35SXin Li       const __m128i b2_lo = _mm_or_si128(b1_lo, a2_lo);
54*b2055c35SXin Li       const __m128i b2_hi = _mm_or_si128(b1_hi, a2_hi);
55*b2055c35SXin Li       // store
56*b2055c35SXin Li       _mm_storeu_si128(out + 0, b2_lo);
57*b2055c35SXin Li       _mm_storeu_si128(out + 1, b2_hi);
58*b2055c35SXin Li       // accumulate eight alpha 'and' in parallel
59*b2055c35SXin Li       all_alphas = _mm_and_si128(all_alphas, a0);
60*b2055c35SXin Li       out += 2;
61*b2055c35SXin Li     }
62*b2055c35SXin Li     for (; i < width; ++i) {
63*b2055c35SXin Li       const uint32_t alpha_value = alpha[i];
64*b2055c35SXin Li       dst[4 * i] = alpha_value;
65*b2055c35SXin Li       alpha_and &= alpha_value;
66*b2055c35SXin Li     }
67*b2055c35SXin Li     alpha += alpha_stride;
68*b2055c35SXin Li     dst += dst_stride;
69*b2055c35SXin Li   }
70*b2055c35SXin Li   // Combine the eight alpha 'and' into a 8-bit mask.
71*b2055c35SXin Li   alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff));
72*b2055c35SXin Li   return (alpha_and != 0xff);
73*b2055c35SXin Li }
74*b2055c35SXin Li 
DispatchAlphaToGreen_SSE2(const uint8_t * WEBP_RESTRICT alpha,int alpha_stride,int width,int height,uint32_t * WEBP_RESTRICT dst,int dst_stride)75*b2055c35SXin Li static void DispatchAlphaToGreen_SSE2(const uint8_t* WEBP_RESTRICT alpha,
76*b2055c35SXin Li                                       int alpha_stride, int width, int height,
77*b2055c35SXin Li                                       uint32_t* WEBP_RESTRICT dst,
78*b2055c35SXin Li                                       int dst_stride) {
79*b2055c35SXin Li   int i, j;
80*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
81*b2055c35SXin Li   const int limit = width & ~15;
82*b2055c35SXin Li   for (j = 0; j < height; ++j) {
83*b2055c35SXin Li     for (i = 0; i < limit; i += 16) {   // process 16 alpha bytes
84*b2055c35SXin Li       const __m128i a0 = _mm_loadu_si128((const __m128i*)&alpha[i]);
85*b2055c35SXin Li       const __m128i a1 = _mm_unpacklo_epi8(zero, a0);  // note the 'zero' first!
86*b2055c35SXin Li       const __m128i b1 = _mm_unpackhi_epi8(zero, a0);
87*b2055c35SXin Li       const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);
88*b2055c35SXin Li       const __m128i b2_lo = _mm_unpacklo_epi16(b1, zero);
89*b2055c35SXin Li       const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);
90*b2055c35SXin Li       const __m128i b2_hi = _mm_unpackhi_epi16(b1, zero);
91*b2055c35SXin Li       _mm_storeu_si128((__m128i*)&dst[i +  0], a2_lo);
92*b2055c35SXin Li       _mm_storeu_si128((__m128i*)&dst[i +  4], a2_hi);
93*b2055c35SXin Li       _mm_storeu_si128((__m128i*)&dst[i +  8], b2_lo);
94*b2055c35SXin Li       _mm_storeu_si128((__m128i*)&dst[i + 12], b2_hi);
95*b2055c35SXin Li     }
96*b2055c35SXin Li     for (; i < width; ++i) dst[i] = alpha[i] << 8;
97*b2055c35SXin Li     alpha += alpha_stride;
98*b2055c35SXin Li     dst += dst_stride;
99*b2055c35SXin Li   }
100*b2055c35SXin Li }
101*b2055c35SXin Li 
ExtractAlpha_SSE2(const uint8_t * WEBP_RESTRICT argb,int argb_stride,int width,int height,uint8_t * WEBP_RESTRICT alpha,int alpha_stride)102*b2055c35SXin Li static int ExtractAlpha_SSE2(const uint8_t* WEBP_RESTRICT argb, int argb_stride,
103*b2055c35SXin Li                              int width, int height,
104*b2055c35SXin Li                              uint8_t* WEBP_RESTRICT alpha, int alpha_stride) {
105*b2055c35SXin Li   // alpha_and stores an 'and' operation of all the alpha[] values. The final
106*b2055c35SXin Li   // value is not 0xff if any of the alpha[] is not equal to 0xff.
107*b2055c35SXin Li   uint32_t alpha_and = 0xff;
108*b2055c35SXin Li   int i, j;
109*b2055c35SXin Li   const __m128i a_mask = _mm_set1_epi32(0xff);  // to preserve alpha
110*b2055c35SXin Li   const __m128i all_0xff = _mm_set_epi32(0, 0, ~0, ~0);
111*b2055c35SXin Li   __m128i all_alphas = all_0xff;
112*b2055c35SXin Li 
113*b2055c35SXin Li   // We must be able to access 3 extra bytes after the last written byte
114*b2055c35SXin Li   // 'src[4 * width - 4]', because we don't know if alpha is the first or the
115*b2055c35SXin Li   // last byte of the quadruplet.
116*b2055c35SXin Li   const int limit = (width - 1) & ~7;
117*b2055c35SXin Li 
118*b2055c35SXin Li   for (j = 0; j < height; ++j) {
119*b2055c35SXin Li     const __m128i* src = (const __m128i*)argb;
120*b2055c35SXin Li     for (i = 0; i < limit; i += 8) {
121*b2055c35SXin Li       // load 32 argb bytes
122*b2055c35SXin Li       const __m128i a0 = _mm_loadu_si128(src + 0);
123*b2055c35SXin Li       const __m128i a1 = _mm_loadu_si128(src + 1);
124*b2055c35SXin Li       const __m128i b0 = _mm_and_si128(a0, a_mask);
125*b2055c35SXin Li       const __m128i b1 = _mm_and_si128(a1, a_mask);
126*b2055c35SXin Li       const __m128i c0 = _mm_packs_epi32(b0, b1);
127*b2055c35SXin Li       const __m128i d0 = _mm_packus_epi16(c0, c0);
128*b2055c35SXin Li       // store
129*b2055c35SXin Li       _mm_storel_epi64((__m128i*)&alpha[i], d0);
130*b2055c35SXin Li       // accumulate eight alpha 'and' in parallel
131*b2055c35SXin Li       all_alphas = _mm_and_si128(all_alphas, d0);
132*b2055c35SXin Li       src += 2;
133*b2055c35SXin Li     }
134*b2055c35SXin Li     for (; i < width; ++i) {
135*b2055c35SXin Li       const uint32_t alpha_value = argb[4 * i];
136*b2055c35SXin Li       alpha[i] = alpha_value;
137*b2055c35SXin Li       alpha_and &= alpha_value;
138*b2055c35SXin Li     }
139*b2055c35SXin Li     argb += argb_stride;
140*b2055c35SXin Li     alpha += alpha_stride;
141*b2055c35SXin Li   }
142*b2055c35SXin Li   // Combine the eight alpha 'and' into a 8-bit mask.
143*b2055c35SXin Li   alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff));
144*b2055c35SXin Li   return (alpha_and == 0xff);
145*b2055c35SXin Li }
146*b2055c35SXin Li 
ExtractGreen_SSE2(const uint32_t * WEBP_RESTRICT argb,uint8_t * WEBP_RESTRICT alpha,int size)147*b2055c35SXin Li static void ExtractGreen_SSE2(const uint32_t* WEBP_RESTRICT argb,
148*b2055c35SXin Li                               uint8_t* WEBP_RESTRICT alpha, int size) {
149*b2055c35SXin Li   int i;
150*b2055c35SXin Li   const __m128i mask = _mm_set1_epi32(0xff);
151*b2055c35SXin Li   const __m128i* src = (const __m128i*)argb;
152*b2055c35SXin Li 
153*b2055c35SXin Li   for (i = 0; i + 16 <= size; i += 16, src += 4) {
154*b2055c35SXin Li     const __m128i a0 = _mm_loadu_si128(src + 0);
155*b2055c35SXin Li     const __m128i a1 = _mm_loadu_si128(src + 1);
156*b2055c35SXin Li     const __m128i a2 = _mm_loadu_si128(src + 2);
157*b2055c35SXin Li     const __m128i a3 = _mm_loadu_si128(src + 3);
158*b2055c35SXin Li     const __m128i b0 = _mm_srli_epi32(a0, 8);
159*b2055c35SXin Li     const __m128i b1 = _mm_srli_epi32(a1, 8);
160*b2055c35SXin Li     const __m128i b2 = _mm_srli_epi32(a2, 8);
161*b2055c35SXin Li     const __m128i b3 = _mm_srli_epi32(a3, 8);
162*b2055c35SXin Li     const __m128i c0 = _mm_and_si128(b0, mask);
163*b2055c35SXin Li     const __m128i c1 = _mm_and_si128(b1, mask);
164*b2055c35SXin Li     const __m128i c2 = _mm_and_si128(b2, mask);
165*b2055c35SXin Li     const __m128i c3 = _mm_and_si128(b3, mask);
166*b2055c35SXin Li     const __m128i d0 = _mm_packs_epi32(c0, c1);
167*b2055c35SXin Li     const __m128i d1 = _mm_packs_epi32(c2, c3);
168*b2055c35SXin Li     const __m128i e = _mm_packus_epi16(d0, d1);
169*b2055c35SXin Li     // store
170*b2055c35SXin Li     _mm_storeu_si128((__m128i*)&alpha[i], e);
171*b2055c35SXin Li   }
172*b2055c35SXin Li   if (i + 8 <= size) {
173*b2055c35SXin Li     const __m128i a0 = _mm_loadu_si128(src + 0);
174*b2055c35SXin Li     const __m128i a1 = _mm_loadu_si128(src + 1);
175*b2055c35SXin Li     const __m128i b0 = _mm_srli_epi32(a0, 8);
176*b2055c35SXin Li     const __m128i b1 = _mm_srli_epi32(a1, 8);
177*b2055c35SXin Li     const __m128i c0 = _mm_and_si128(b0, mask);
178*b2055c35SXin Li     const __m128i c1 = _mm_and_si128(b1, mask);
179*b2055c35SXin Li     const __m128i d = _mm_packs_epi32(c0, c1);
180*b2055c35SXin Li     const __m128i e = _mm_packus_epi16(d, d);
181*b2055c35SXin Li     _mm_storel_epi64((__m128i*)&alpha[i], e);
182*b2055c35SXin Li     i += 8;
183*b2055c35SXin Li   }
184*b2055c35SXin Li   for (; i < size; ++i) alpha[i] = argb[i] >> 8;
185*b2055c35SXin Li }
186*b2055c35SXin Li 
187*b2055c35SXin Li //------------------------------------------------------------------------------
188*b2055c35SXin Li // Non-dither premultiplied modes
189*b2055c35SXin Li 
190*b2055c35SXin Li #define MULTIPLIER(a)   ((a) * 0x8081)
191*b2055c35SXin Li #define PREMULTIPLY(x, m) (((x) * (m)) >> 23)
192*b2055c35SXin Li 
193*b2055c35SXin Li // We can't use a 'const int' for the SHUFFLE value, because it has to be an
194*b2055c35SXin Li // immediate in the _mm_shufflexx_epi16() instruction. We really need a macro.
195*b2055c35SXin Li // We use: v / 255 = (v * 0x8081) >> 23, where v = alpha * {r,g,b} is a 16bit
196*b2055c35SXin Li // value.
197*b2055c35SXin Li #define APPLY_ALPHA(RGBX, SHUFFLE) do {                              \
198*b2055c35SXin Li   const __m128i argb0 = _mm_loadu_si128((const __m128i*)&(RGBX));    \
199*b2055c35SXin Li   const __m128i argb1_lo = _mm_unpacklo_epi8(argb0, zero);           \
200*b2055c35SXin Li   const __m128i argb1_hi = _mm_unpackhi_epi8(argb0, zero);           \
201*b2055c35SXin Li   const __m128i alpha0_lo = _mm_or_si128(argb1_lo, kMask);           \
202*b2055c35SXin Li   const __m128i alpha0_hi = _mm_or_si128(argb1_hi, kMask);           \
203*b2055c35SXin Li   const __m128i alpha1_lo = _mm_shufflelo_epi16(alpha0_lo, SHUFFLE); \
204*b2055c35SXin Li   const __m128i alpha1_hi = _mm_shufflelo_epi16(alpha0_hi, SHUFFLE); \
205*b2055c35SXin Li   const __m128i alpha2_lo = _mm_shufflehi_epi16(alpha1_lo, SHUFFLE); \
206*b2055c35SXin Li   const __m128i alpha2_hi = _mm_shufflehi_epi16(alpha1_hi, SHUFFLE); \
207*b2055c35SXin Li   /* alpha2 = [ff a0 a0 a0][ff a1 a1 a1] */                          \
208*b2055c35SXin Li   const __m128i A0_lo = _mm_mullo_epi16(alpha2_lo, argb1_lo);        \
209*b2055c35SXin Li   const __m128i A0_hi = _mm_mullo_epi16(alpha2_hi, argb1_hi);        \
210*b2055c35SXin Li   const __m128i A1_lo = _mm_mulhi_epu16(A0_lo, kMult);               \
211*b2055c35SXin Li   const __m128i A1_hi = _mm_mulhi_epu16(A0_hi, kMult);               \
212*b2055c35SXin Li   const __m128i A2_lo = _mm_srli_epi16(A1_lo, 7);                    \
213*b2055c35SXin Li   const __m128i A2_hi = _mm_srli_epi16(A1_hi, 7);                    \
214*b2055c35SXin Li   const __m128i A3 = _mm_packus_epi16(A2_lo, A2_hi);                 \
215*b2055c35SXin Li   _mm_storeu_si128((__m128i*)&(RGBX), A3);                           \
216*b2055c35SXin Li } while (0)
217*b2055c35SXin Li 
ApplyAlphaMultiply_SSE2(uint8_t * rgba,int alpha_first,int w,int h,int stride)218*b2055c35SXin Li static void ApplyAlphaMultiply_SSE2(uint8_t* rgba, int alpha_first,
219*b2055c35SXin Li                                     int w, int h, int stride) {
220*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
221*b2055c35SXin Li   const __m128i kMult = _mm_set1_epi16((short)0x8081);
222*b2055c35SXin Li   const __m128i kMask = _mm_set_epi16(0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0);
223*b2055c35SXin Li   const int kSpan = 4;
224*b2055c35SXin Li   while (h-- > 0) {
225*b2055c35SXin Li     uint32_t* const rgbx = (uint32_t*)rgba;
226*b2055c35SXin Li     int i;
227*b2055c35SXin Li     if (!alpha_first) {
228*b2055c35SXin Li       for (i = 0; i + kSpan <= w; i += kSpan) {
229*b2055c35SXin Li         APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(2, 3, 3, 3));
230*b2055c35SXin Li       }
231*b2055c35SXin Li     } else {
232*b2055c35SXin Li       for (i = 0; i + kSpan <= w; i += kSpan) {
233*b2055c35SXin Li         APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(0, 0, 0, 1));
234*b2055c35SXin Li       }
235*b2055c35SXin Li     }
236*b2055c35SXin Li     // Finish with left-overs.
237*b2055c35SXin Li     for (; i < w; ++i) {
238*b2055c35SXin Li       uint8_t* const rgb = rgba + (alpha_first ? 1 : 0);
239*b2055c35SXin Li       const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3);
240*b2055c35SXin Li       const uint32_t a = alpha[4 * i];
241*b2055c35SXin Li       if (a != 0xff) {
242*b2055c35SXin Li         const uint32_t mult = MULTIPLIER(a);
243*b2055c35SXin Li         rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult);
244*b2055c35SXin Li         rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult);
245*b2055c35SXin Li         rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult);
246*b2055c35SXin Li       }
247*b2055c35SXin Li     }
248*b2055c35SXin Li     rgba += stride;
249*b2055c35SXin Li   }
250*b2055c35SXin Li }
251*b2055c35SXin Li #undef MULTIPLIER
252*b2055c35SXin Li #undef PREMULTIPLY
253*b2055c35SXin Li 
254*b2055c35SXin Li //------------------------------------------------------------------------------
255*b2055c35SXin Li // Alpha detection
256*b2055c35SXin Li 
HasAlpha8b_SSE2(const uint8_t * src,int length)257*b2055c35SXin Li static int HasAlpha8b_SSE2(const uint8_t* src, int length) {
258*b2055c35SXin Li   const __m128i all_0xff = _mm_set1_epi8((char)0xff);
259*b2055c35SXin Li   int i = 0;
260*b2055c35SXin Li   for (; i + 16 <= length; i += 16) {
261*b2055c35SXin Li     const __m128i v = _mm_loadu_si128((const __m128i*)(src + i));
262*b2055c35SXin Li     const __m128i bits = _mm_cmpeq_epi8(v, all_0xff);
263*b2055c35SXin Li     const int mask = _mm_movemask_epi8(bits);
264*b2055c35SXin Li     if (mask != 0xffff) return 1;
265*b2055c35SXin Li   }
266*b2055c35SXin Li   for (; i < length; ++i) if (src[i] != 0xff) return 1;
267*b2055c35SXin Li   return 0;
268*b2055c35SXin Li }
269*b2055c35SXin Li 
HasAlpha32b_SSE2(const uint8_t * src,int length)270*b2055c35SXin Li static int HasAlpha32b_SSE2(const uint8_t* src, int length) {
271*b2055c35SXin Li   const __m128i alpha_mask = _mm_set1_epi32(0xff);
272*b2055c35SXin Li   const __m128i all_0xff = _mm_set1_epi8((char)0xff);
273*b2055c35SXin Li   int i = 0;
274*b2055c35SXin Li   // We don't know if we can access the last 3 bytes after the last alpha
275*b2055c35SXin Li   // value 'src[4 * length - 4]' (because we don't know if alpha is the first
276*b2055c35SXin Li   // or the last byte of the quadruplet). Hence the '-3' protection below.
277*b2055c35SXin Li   length = length * 4 - 3;   // size in bytes
278*b2055c35SXin Li   for (; i + 64 <= length; i += 64) {
279*b2055c35SXin Li     const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i +  0));
280*b2055c35SXin Li     const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16));
281*b2055c35SXin Li     const __m128i a2 = _mm_loadu_si128((const __m128i*)(src + i + 32));
282*b2055c35SXin Li     const __m128i a3 = _mm_loadu_si128((const __m128i*)(src + i + 48));
283*b2055c35SXin Li     const __m128i b0 = _mm_and_si128(a0, alpha_mask);
284*b2055c35SXin Li     const __m128i b1 = _mm_and_si128(a1, alpha_mask);
285*b2055c35SXin Li     const __m128i b2 = _mm_and_si128(a2, alpha_mask);
286*b2055c35SXin Li     const __m128i b3 = _mm_and_si128(a3, alpha_mask);
287*b2055c35SXin Li     const __m128i c0 = _mm_packs_epi32(b0, b1);
288*b2055c35SXin Li     const __m128i c1 = _mm_packs_epi32(b2, b3);
289*b2055c35SXin Li     const __m128i d  = _mm_packus_epi16(c0, c1);
290*b2055c35SXin Li     const __m128i bits = _mm_cmpeq_epi8(d, all_0xff);
291*b2055c35SXin Li     const int mask = _mm_movemask_epi8(bits);
292*b2055c35SXin Li     if (mask != 0xffff) return 1;
293*b2055c35SXin Li   }
294*b2055c35SXin Li   for (; i + 32 <= length; i += 32) {
295*b2055c35SXin Li     const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i +  0));
296*b2055c35SXin Li     const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16));
297*b2055c35SXin Li     const __m128i b0 = _mm_and_si128(a0, alpha_mask);
298*b2055c35SXin Li     const __m128i b1 = _mm_and_si128(a1, alpha_mask);
299*b2055c35SXin Li     const __m128i c  = _mm_packs_epi32(b0, b1);
300*b2055c35SXin Li     const __m128i d  = _mm_packus_epi16(c, c);
301*b2055c35SXin Li     const __m128i bits = _mm_cmpeq_epi8(d, all_0xff);
302*b2055c35SXin Li     const int mask = _mm_movemask_epi8(bits);
303*b2055c35SXin Li     if (mask != 0xffff) return 1;
304*b2055c35SXin Li   }
305*b2055c35SXin Li   for (; i <= length; i += 4) if (src[i] != 0xff) return 1;
306*b2055c35SXin Li   return 0;
307*b2055c35SXin Li }
308*b2055c35SXin Li 
AlphaReplace_SSE2(uint32_t * src,int length,uint32_t color)309*b2055c35SXin Li static void AlphaReplace_SSE2(uint32_t* src, int length, uint32_t color) {
310*b2055c35SXin Li   const __m128i m_color = _mm_set1_epi32((int)color);
311*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
312*b2055c35SXin Li   int i = 0;
313*b2055c35SXin Li   for (; i + 8 <= length; i += 8) {
314*b2055c35SXin Li     const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0));
315*b2055c35SXin Li     const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 4));
316*b2055c35SXin Li     const __m128i b0 = _mm_srai_epi32(a0, 24);
317*b2055c35SXin Li     const __m128i b1 = _mm_srai_epi32(a1, 24);
318*b2055c35SXin Li     const __m128i c0 = _mm_cmpeq_epi32(b0, zero);
319*b2055c35SXin Li     const __m128i c1 = _mm_cmpeq_epi32(b1, zero);
320*b2055c35SXin Li     const __m128i d0 = _mm_and_si128(c0, m_color);
321*b2055c35SXin Li     const __m128i d1 = _mm_and_si128(c1, m_color);
322*b2055c35SXin Li     const __m128i e0 = _mm_andnot_si128(c0, a0);
323*b2055c35SXin Li     const __m128i e1 = _mm_andnot_si128(c1, a1);
324*b2055c35SXin Li     _mm_storeu_si128((__m128i*)(src + i + 0), _mm_or_si128(d0, e0));
325*b2055c35SXin Li     _mm_storeu_si128((__m128i*)(src + i + 4), _mm_or_si128(d1, e1));
326*b2055c35SXin Li   }
327*b2055c35SXin Li   for (; i < length; ++i) if ((src[i] >> 24) == 0) src[i] = color;
328*b2055c35SXin Li }
329*b2055c35SXin Li 
330*b2055c35SXin Li // -----------------------------------------------------------------------------
331*b2055c35SXin Li // Apply alpha value to rows
332*b2055c35SXin Li 
MultARGBRow_SSE2(uint32_t * const ptr,int width,int inverse)333*b2055c35SXin Li static void MultARGBRow_SSE2(uint32_t* const ptr, int width, int inverse) {
334*b2055c35SXin Li   int x = 0;
335*b2055c35SXin Li   if (!inverse) {
336*b2055c35SXin Li     const int kSpan = 2;
337*b2055c35SXin Li     const __m128i zero = _mm_setzero_si128();
338*b2055c35SXin Li     const __m128i k128 = _mm_set1_epi16(128);
339*b2055c35SXin Li     const __m128i kMult = _mm_set1_epi16(0x0101);
340*b2055c35SXin Li     const __m128i kMask = _mm_set_epi16(0, 0xff, 0, 0, 0, 0xff, 0, 0);
341*b2055c35SXin Li     for (x = 0; x + kSpan <= width; x += kSpan) {
342*b2055c35SXin Li       // To compute 'result = (int)(a * x / 255. + .5)', we use:
343*b2055c35SXin Li       //   tmp = a * v + 128, result = (tmp * 0x0101u) >> 16
344*b2055c35SXin Li       const __m128i A0 = _mm_loadl_epi64((const __m128i*)&ptr[x]);
345*b2055c35SXin Li       const __m128i A1 = _mm_unpacklo_epi8(A0, zero);
346*b2055c35SXin Li       const __m128i A2 = _mm_or_si128(A1, kMask);
347*b2055c35SXin Li       const __m128i A3 = _mm_shufflelo_epi16(A2, _MM_SHUFFLE(2, 3, 3, 3));
348*b2055c35SXin Li       const __m128i A4 = _mm_shufflehi_epi16(A3, _MM_SHUFFLE(2, 3, 3, 3));
349*b2055c35SXin Li       // here, A4 = [ff a0 a0 a0][ff a1 a1 a1]
350*b2055c35SXin Li       const __m128i A5 = _mm_mullo_epi16(A4, A1);
351*b2055c35SXin Li       const __m128i A6 = _mm_add_epi16(A5, k128);
352*b2055c35SXin Li       const __m128i A7 = _mm_mulhi_epu16(A6, kMult);
353*b2055c35SXin Li       const __m128i A10 = _mm_packus_epi16(A7, zero);
354*b2055c35SXin Li       _mm_storel_epi64((__m128i*)&ptr[x], A10);
355*b2055c35SXin Li     }
356*b2055c35SXin Li   }
357*b2055c35SXin Li   width -= x;
358*b2055c35SXin Li   if (width > 0) WebPMultARGBRow_C(ptr + x, width, inverse);
359*b2055c35SXin Li }
360*b2055c35SXin Li 
MultRow_SSE2(uint8_t * WEBP_RESTRICT const ptr,const uint8_t * WEBP_RESTRICT const alpha,int width,int inverse)361*b2055c35SXin Li static void MultRow_SSE2(uint8_t* WEBP_RESTRICT const ptr,
362*b2055c35SXin Li                          const uint8_t* WEBP_RESTRICT const alpha,
363*b2055c35SXin Li                          int width, int inverse) {
364*b2055c35SXin Li   int x = 0;
365*b2055c35SXin Li   if (!inverse) {
366*b2055c35SXin Li     const __m128i zero = _mm_setzero_si128();
367*b2055c35SXin Li     const __m128i k128 = _mm_set1_epi16(128);
368*b2055c35SXin Li     const __m128i kMult = _mm_set1_epi16(0x0101);
369*b2055c35SXin Li     for (x = 0; x + 8 <= width; x += 8) {
370*b2055c35SXin Li       const __m128i v0 = _mm_loadl_epi64((__m128i*)&ptr[x]);
371*b2055c35SXin Li       const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[x]);
372*b2055c35SXin Li       const __m128i v1 = _mm_unpacklo_epi8(v0, zero);
373*b2055c35SXin Li       const __m128i a1 = _mm_unpacklo_epi8(a0, zero);
374*b2055c35SXin Li       const __m128i v2 = _mm_mullo_epi16(v1, a1);
375*b2055c35SXin Li       const __m128i v3 = _mm_add_epi16(v2, k128);
376*b2055c35SXin Li       const __m128i v4 = _mm_mulhi_epu16(v3, kMult);
377*b2055c35SXin Li       const __m128i v5 = _mm_packus_epi16(v4, zero);
378*b2055c35SXin Li       _mm_storel_epi64((__m128i*)&ptr[x], v5);
379*b2055c35SXin Li     }
380*b2055c35SXin Li   }
381*b2055c35SXin Li   width -= x;
382*b2055c35SXin Li   if (width > 0) WebPMultRow_C(ptr + x, alpha + x, width, inverse);
383*b2055c35SXin Li }
384*b2055c35SXin Li 
385*b2055c35SXin Li //------------------------------------------------------------------------------
386*b2055c35SXin Li // Entry point
387*b2055c35SXin Li 
388*b2055c35SXin Li extern void WebPInitAlphaProcessingSSE2(void);
389*b2055c35SXin Li 
WebPInitAlphaProcessingSSE2(void)390*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingSSE2(void) {
391*b2055c35SXin Li   WebPMultARGBRow = MultARGBRow_SSE2;
392*b2055c35SXin Li   WebPMultRow = MultRow_SSE2;
393*b2055c35SXin Li   WebPApplyAlphaMultiply = ApplyAlphaMultiply_SSE2;
394*b2055c35SXin Li   WebPDispatchAlpha = DispatchAlpha_SSE2;
395*b2055c35SXin Li   WebPDispatchAlphaToGreen = DispatchAlphaToGreen_SSE2;
396*b2055c35SXin Li   WebPExtractAlpha = ExtractAlpha_SSE2;
397*b2055c35SXin Li   WebPExtractGreen = ExtractGreen_SSE2;
398*b2055c35SXin Li 
399*b2055c35SXin Li   WebPHasAlpha8b = HasAlpha8b_SSE2;
400*b2055c35SXin Li   WebPHasAlpha32b = HasAlpha32b_SSE2;
401*b2055c35SXin Li   WebPAlphaReplace = AlphaReplace_SSE2;
402*b2055c35SXin Li }
403*b2055c35SXin Li 
404*b2055c35SXin Li #else  // !WEBP_USE_SSE2
405*b2055c35SXin Li 
406*b2055c35SXin Li WEBP_DSP_INIT_STUB(WebPInitAlphaProcessingSSE2)
407*b2055c35SXin Li 
408*b2055c35SXin Li #endif  // WEBP_USE_SSE2
409