xref: /aosp_15_r20/external/webp/src/dsp/enc_sse41.c (revision b2055c353e87c8814eb2b6b1b11112a1562253bd)
1*b2055c35SXin Li // Copyright 2015 Google Inc. All Rights Reserved.
2*b2055c35SXin Li //
3*b2055c35SXin Li // Use of this source code is governed by a BSD-style license
4*b2055c35SXin Li // that can be found in the COPYING file in the root of the source
5*b2055c35SXin Li // tree. An additional intellectual property rights grant can be found
6*b2055c35SXin Li // in the file PATENTS. All contributing project authors may
7*b2055c35SXin Li // be found in the AUTHORS file in the root of the source tree.
8*b2055c35SXin Li // -----------------------------------------------------------------------------
9*b2055c35SXin Li //
10*b2055c35SXin Li // SSE4 version of some encoding functions.
11*b2055c35SXin Li //
12*b2055c35SXin Li // Author: Skal ([email protected])
13*b2055c35SXin Li 
14*b2055c35SXin Li #include "src/dsp/dsp.h"
15*b2055c35SXin Li 
16*b2055c35SXin Li #if defined(WEBP_USE_SSE41)
17*b2055c35SXin Li #include <smmintrin.h>
18*b2055c35SXin Li #include <stdlib.h>  // for abs()
19*b2055c35SXin Li 
20*b2055c35SXin Li #include "src/dsp/common_sse2.h"
21*b2055c35SXin Li #include "src/enc/vp8i_enc.h"
22*b2055c35SXin Li 
23*b2055c35SXin Li //------------------------------------------------------------------------------
24*b2055c35SXin Li // Compute susceptibility based on DCT-coeff histograms.
25*b2055c35SXin Li 
CollectHistogram_SSE41(const uint8_t * ref,const uint8_t * pred,int start_block,int end_block,VP8Histogram * const histo)26*b2055c35SXin Li static void CollectHistogram_SSE41(const uint8_t* ref, const uint8_t* pred,
27*b2055c35SXin Li                                    int start_block, int end_block,
28*b2055c35SXin Li                                    VP8Histogram* const histo) {
29*b2055c35SXin Li   const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
30*b2055c35SXin Li   int j;
31*b2055c35SXin Li   int distribution[MAX_COEFF_THRESH + 1] = { 0 };
32*b2055c35SXin Li   for (j = start_block; j < end_block; ++j) {
33*b2055c35SXin Li     int16_t out[16];
34*b2055c35SXin Li     int k;
35*b2055c35SXin Li 
36*b2055c35SXin Li     VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
37*b2055c35SXin Li 
38*b2055c35SXin Li     // Convert coefficients to bin (within out[]).
39*b2055c35SXin Li     {
40*b2055c35SXin Li       // Load.
41*b2055c35SXin Li       const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]);
42*b2055c35SXin Li       const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]);
43*b2055c35SXin Li       // v = abs(out) >> 3
44*b2055c35SXin Li       const __m128i abs0 = _mm_abs_epi16(out0);
45*b2055c35SXin Li       const __m128i abs1 = _mm_abs_epi16(out1);
46*b2055c35SXin Li       const __m128i v0 = _mm_srai_epi16(abs0, 3);
47*b2055c35SXin Li       const __m128i v1 = _mm_srai_epi16(abs1, 3);
48*b2055c35SXin Li       // bin = min(v, MAX_COEFF_THRESH)
49*b2055c35SXin Li       const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
50*b2055c35SXin Li       const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
51*b2055c35SXin Li       // Store.
52*b2055c35SXin Li       _mm_storeu_si128((__m128i*)&out[0], bin0);
53*b2055c35SXin Li       _mm_storeu_si128((__m128i*)&out[8], bin1);
54*b2055c35SXin Li     }
55*b2055c35SXin Li 
56*b2055c35SXin Li     // Convert coefficients to bin.
57*b2055c35SXin Li     for (k = 0; k < 16; ++k) {
58*b2055c35SXin Li       ++distribution[out[k]];
59*b2055c35SXin Li     }
60*b2055c35SXin Li   }
61*b2055c35SXin Li   VP8SetHistogramData(distribution, histo);
62*b2055c35SXin Li }
63*b2055c35SXin Li 
64*b2055c35SXin Li //------------------------------------------------------------------------------
65*b2055c35SXin Li // Texture distortion
66*b2055c35SXin Li //
67*b2055c35SXin Li // We try to match the spectral content (weighted) between source and
68*b2055c35SXin Li // reconstructed samples.
69*b2055c35SXin Li 
70*b2055c35SXin Li // Hadamard transform
71*b2055c35SXin Li // Returns the weighted sum of the absolute value of transformed coefficients.
72*b2055c35SXin Li // w[] contains a row-major 4 by 4 symmetric matrix.
TTransform_SSE41(const uint8_t * inA,const uint8_t * inB,const uint16_t * const w)73*b2055c35SXin Li static int TTransform_SSE41(const uint8_t* inA, const uint8_t* inB,
74*b2055c35SXin Li                             const uint16_t* const w) {
75*b2055c35SXin Li   int32_t sum[4];
76*b2055c35SXin Li   __m128i tmp_0, tmp_1, tmp_2, tmp_3;
77*b2055c35SXin Li 
78*b2055c35SXin Li   // Load and combine inputs.
79*b2055c35SXin Li   {
80*b2055c35SXin Li     const __m128i inA_0 = _mm_loadu_si128((const __m128i*)&inA[BPS * 0]);
81*b2055c35SXin Li     const __m128i inA_1 = _mm_loadu_si128((const __m128i*)&inA[BPS * 1]);
82*b2055c35SXin Li     const __m128i inA_2 = _mm_loadu_si128((const __m128i*)&inA[BPS * 2]);
83*b2055c35SXin Li     // In SSE4.1, with gcc 4.8 at least (maybe other versions),
84*b2055c35SXin Li     // _mm_loadu_si128 is faster than _mm_loadl_epi64. But for the last lump
85*b2055c35SXin Li     // of inA and inB, _mm_loadl_epi64 is still used not to have an out of
86*b2055c35SXin Li     // bound read.
87*b2055c35SXin Li     const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]);
88*b2055c35SXin Li     const __m128i inB_0 = _mm_loadu_si128((const __m128i*)&inB[BPS * 0]);
89*b2055c35SXin Li     const __m128i inB_1 = _mm_loadu_si128((const __m128i*)&inB[BPS * 1]);
90*b2055c35SXin Li     const __m128i inB_2 = _mm_loadu_si128((const __m128i*)&inB[BPS * 2]);
91*b2055c35SXin Li     const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]);
92*b2055c35SXin Li 
93*b2055c35SXin Li     // Combine inA and inB (we'll do two transforms in parallel).
94*b2055c35SXin Li     const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0);
95*b2055c35SXin Li     const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1);
96*b2055c35SXin Li     const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2);
97*b2055c35SXin Li     const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3);
98*b2055c35SXin Li     tmp_0 = _mm_cvtepu8_epi16(inAB_0);
99*b2055c35SXin Li     tmp_1 = _mm_cvtepu8_epi16(inAB_1);
100*b2055c35SXin Li     tmp_2 = _mm_cvtepu8_epi16(inAB_2);
101*b2055c35SXin Li     tmp_3 = _mm_cvtepu8_epi16(inAB_3);
102*b2055c35SXin Li     // a00 a01 a02 a03   b00 b01 b02 b03
103*b2055c35SXin Li     // a10 a11 a12 a13   b10 b11 b12 b13
104*b2055c35SXin Li     // a20 a21 a22 a23   b20 b21 b22 b23
105*b2055c35SXin Li     // a30 a31 a32 a33   b30 b31 b32 b33
106*b2055c35SXin Li   }
107*b2055c35SXin Li 
108*b2055c35SXin Li   // Vertical pass first to avoid a transpose (vertical and horizontal passes
109*b2055c35SXin Li   // are commutative because w/kWeightY is symmetric) and subsequent transpose.
110*b2055c35SXin Li   {
111*b2055c35SXin Li     // Calculate a and b (two 4x4 at once).
112*b2055c35SXin Li     const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
113*b2055c35SXin Li     const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
114*b2055c35SXin Li     const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
115*b2055c35SXin Li     const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
116*b2055c35SXin Li     const __m128i b0 = _mm_add_epi16(a0, a1);
117*b2055c35SXin Li     const __m128i b1 = _mm_add_epi16(a3, a2);
118*b2055c35SXin Li     const __m128i b2 = _mm_sub_epi16(a3, a2);
119*b2055c35SXin Li     const __m128i b3 = _mm_sub_epi16(a0, a1);
120*b2055c35SXin Li     // a00 a01 a02 a03   b00 b01 b02 b03
121*b2055c35SXin Li     // a10 a11 a12 a13   b10 b11 b12 b13
122*b2055c35SXin Li     // a20 a21 a22 a23   b20 b21 b22 b23
123*b2055c35SXin Li     // a30 a31 a32 a33   b30 b31 b32 b33
124*b2055c35SXin Li 
125*b2055c35SXin Li     // Transpose the two 4x4.
126*b2055c35SXin Li     VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3);
127*b2055c35SXin Li   }
128*b2055c35SXin Li 
129*b2055c35SXin Li   // Horizontal pass and difference of weighted sums.
130*b2055c35SXin Li   {
131*b2055c35SXin Li     // Load all inputs.
132*b2055c35SXin Li     const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]);
133*b2055c35SXin Li     const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]);
134*b2055c35SXin Li 
135*b2055c35SXin Li     // Calculate a and b (two 4x4 at once).
136*b2055c35SXin Li     const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
137*b2055c35SXin Li     const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
138*b2055c35SXin Li     const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
139*b2055c35SXin Li     const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
140*b2055c35SXin Li     const __m128i b0 = _mm_add_epi16(a0, a1);
141*b2055c35SXin Li     const __m128i b1 = _mm_add_epi16(a3, a2);
142*b2055c35SXin Li     const __m128i b2 = _mm_sub_epi16(a3, a2);
143*b2055c35SXin Li     const __m128i b3 = _mm_sub_epi16(a0, a1);
144*b2055c35SXin Li 
145*b2055c35SXin Li     // Separate the transforms of inA and inB.
146*b2055c35SXin Li     __m128i A_b0 = _mm_unpacklo_epi64(b0, b1);
147*b2055c35SXin Li     __m128i A_b2 = _mm_unpacklo_epi64(b2, b3);
148*b2055c35SXin Li     __m128i B_b0 = _mm_unpackhi_epi64(b0, b1);
149*b2055c35SXin Li     __m128i B_b2 = _mm_unpackhi_epi64(b2, b3);
150*b2055c35SXin Li 
151*b2055c35SXin Li     A_b0 = _mm_abs_epi16(A_b0);
152*b2055c35SXin Li     A_b2 = _mm_abs_epi16(A_b2);
153*b2055c35SXin Li     B_b0 = _mm_abs_epi16(B_b0);
154*b2055c35SXin Li     B_b2 = _mm_abs_epi16(B_b2);
155*b2055c35SXin Li 
156*b2055c35SXin Li     // weighted sums
157*b2055c35SXin Li     A_b0 = _mm_madd_epi16(A_b0, w_0);
158*b2055c35SXin Li     A_b2 = _mm_madd_epi16(A_b2, w_8);
159*b2055c35SXin Li     B_b0 = _mm_madd_epi16(B_b0, w_0);
160*b2055c35SXin Li     B_b2 = _mm_madd_epi16(B_b2, w_8);
161*b2055c35SXin Li     A_b0 = _mm_add_epi32(A_b0, A_b2);
162*b2055c35SXin Li     B_b0 = _mm_add_epi32(B_b0, B_b2);
163*b2055c35SXin Li 
164*b2055c35SXin Li     // difference of weighted sums
165*b2055c35SXin Li     A_b2 = _mm_sub_epi32(A_b0, B_b0);
166*b2055c35SXin Li     _mm_storeu_si128((__m128i*)&sum[0], A_b2);
167*b2055c35SXin Li   }
168*b2055c35SXin Li   return sum[0] + sum[1] + sum[2] + sum[3];
169*b2055c35SXin Li }
170*b2055c35SXin Li 
Disto4x4_SSE41(const uint8_t * const a,const uint8_t * const b,const uint16_t * const w)171*b2055c35SXin Li static int Disto4x4_SSE41(const uint8_t* const a, const uint8_t* const b,
172*b2055c35SXin Li                           const uint16_t* const w) {
173*b2055c35SXin Li   const int diff_sum = TTransform_SSE41(a, b, w);
174*b2055c35SXin Li   return abs(diff_sum) >> 5;
175*b2055c35SXin Li }
176*b2055c35SXin Li 
Disto16x16_SSE41(const uint8_t * const a,const uint8_t * const b,const uint16_t * const w)177*b2055c35SXin Li static int Disto16x16_SSE41(const uint8_t* const a, const uint8_t* const b,
178*b2055c35SXin Li                             const uint16_t* const w) {
179*b2055c35SXin Li   int D = 0;
180*b2055c35SXin Li   int x, y;
181*b2055c35SXin Li   for (y = 0; y < 16 * BPS; y += 4 * BPS) {
182*b2055c35SXin Li     for (x = 0; x < 16; x += 4) {
183*b2055c35SXin Li       D += Disto4x4_SSE41(a + x + y, b + x + y, w);
184*b2055c35SXin Li     }
185*b2055c35SXin Li   }
186*b2055c35SXin Li   return D;
187*b2055c35SXin Li }
188*b2055c35SXin Li 
189*b2055c35SXin Li //------------------------------------------------------------------------------
190*b2055c35SXin Li // Quantization
191*b2055c35SXin Li //
192*b2055c35SXin Li 
193*b2055c35SXin Li // Generates a pshufb constant for shuffling 16b words.
194*b2055c35SXin Li #define PSHUFB_CST(A,B,C,D,E,F,G,H) \
195*b2055c35SXin Li   _mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \
196*b2055c35SXin Li                2 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \
197*b2055c35SXin Li                2 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \
198*b2055c35SXin Li                2 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0)
199*b2055c35SXin Li 
DoQuantizeBlock_SSE41(int16_t in[16],int16_t out[16],const uint16_t * const sharpen,const VP8Matrix * const mtx)200*b2055c35SXin Li static WEBP_INLINE int DoQuantizeBlock_SSE41(int16_t in[16], int16_t out[16],
201*b2055c35SXin Li                                              const uint16_t* const sharpen,
202*b2055c35SXin Li                                              const VP8Matrix* const mtx) {
203*b2055c35SXin Li   const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);
204*b2055c35SXin Li   const __m128i zero = _mm_setzero_si128();
205*b2055c35SXin Li   __m128i out0, out8;
206*b2055c35SXin Li   __m128i packed_out;
207*b2055c35SXin Li 
208*b2055c35SXin Li   // Load all inputs.
209*b2055c35SXin Li   __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);
210*b2055c35SXin Li   __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);
211*b2055c35SXin Li   const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]);
212*b2055c35SXin Li   const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]);
213*b2055c35SXin Li   const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]);
214*b2055c35SXin Li   const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]);
215*b2055c35SXin Li 
216*b2055c35SXin Li   // coeff = abs(in)
217*b2055c35SXin Li   __m128i coeff0 = _mm_abs_epi16(in0);
218*b2055c35SXin Li   __m128i coeff8 = _mm_abs_epi16(in8);
219*b2055c35SXin Li 
220*b2055c35SXin Li   // coeff = abs(in) + sharpen
221*b2055c35SXin Li   if (sharpen != NULL) {
222*b2055c35SXin Li     const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]);
223*b2055c35SXin Li     const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]);
224*b2055c35SXin Li     coeff0 = _mm_add_epi16(coeff0, sharpen0);
225*b2055c35SXin Li     coeff8 = _mm_add_epi16(coeff8, sharpen8);
226*b2055c35SXin Li   }
227*b2055c35SXin Li 
228*b2055c35SXin Li   // out = (coeff * iQ + B) >> QFIX
229*b2055c35SXin Li   {
230*b2055c35SXin Li     // doing calculations with 32b precision (QFIX=17)
231*b2055c35SXin Li     // out = (coeff * iQ)
232*b2055c35SXin Li     const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);
233*b2055c35SXin Li     const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);
234*b2055c35SXin Li     const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);
235*b2055c35SXin Li     const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);
236*b2055c35SXin Li     __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);
237*b2055c35SXin Li     __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);
238*b2055c35SXin Li     __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);
239*b2055c35SXin Li     __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);
240*b2055c35SXin Li     // out = (coeff * iQ + B)
241*b2055c35SXin Li     const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]);
242*b2055c35SXin Li     const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]);
243*b2055c35SXin Li     const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]);
244*b2055c35SXin Li     const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]);
245*b2055c35SXin Li     out_00 = _mm_add_epi32(out_00, bias_00);
246*b2055c35SXin Li     out_04 = _mm_add_epi32(out_04, bias_04);
247*b2055c35SXin Li     out_08 = _mm_add_epi32(out_08, bias_08);
248*b2055c35SXin Li     out_12 = _mm_add_epi32(out_12, bias_12);
249*b2055c35SXin Li     // out = QUANTDIV(coeff, iQ, B, QFIX)
250*b2055c35SXin Li     out_00 = _mm_srai_epi32(out_00, QFIX);
251*b2055c35SXin Li     out_04 = _mm_srai_epi32(out_04, QFIX);
252*b2055c35SXin Li     out_08 = _mm_srai_epi32(out_08, QFIX);
253*b2055c35SXin Li     out_12 = _mm_srai_epi32(out_12, QFIX);
254*b2055c35SXin Li 
255*b2055c35SXin Li     // pack result as 16b
256*b2055c35SXin Li     out0 = _mm_packs_epi32(out_00, out_04);
257*b2055c35SXin Li     out8 = _mm_packs_epi32(out_08, out_12);
258*b2055c35SXin Li 
259*b2055c35SXin Li     // if (coeff > 2047) coeff = 2047
260*b2055c35SXin Li     out0 = _mm_min_epi16(out0, max_coeff_2047);
261*b2055c35SXin Li     out8 = _mm_min_epi16(out8, max_coeff_2047);
262*b2055c35SXin Li   }
263*b2055c35SXin Li 
264*b2055c35SXin Li   // put sign back
265*b2055c35SXin Li   out0 = _mm_sign_epi16(out0, in0);
266*b2055c35SXin Li   out8 = _mm_sign_epi16(out8, in8);
267*b2055c35SXin Li 
268*b2055c35SXin Li   // in = out * Q
269*b2055c35SXin Li   in0 = _mm_mullo_epi16(out0, q0);
270*b2055c35SXin Li   in8 = _mm_mullo_epi16(out8, q8);
271*b2055c35SXin Li 
272*b2055c35SXin Li   _mm_storeu_si128((__m128i*)&in[0], in0);
273*b2055c35SXin Li   _mm_storeu_si128((__m128i*)&in[8], in8);
274*b2055c35SXin Li 
275*b2055c35SXin Li   // zigzag the output before storing it. The re-ordering is:
276*b2055c35SXin Li   //    0 1 2 3 4 5 6 7 | 8  9 10 11 12 13 14 15
277*b2055c35SXin Li   // -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15
278*b2055c35SXin Li   // There's only two misplaced entries ([8] and [7]) that are crossing the
279*b2055c35SXin Li   // reg's boundaries.
280*b2055c35SXin Li   // We use pshufb instead of pshuflo/pshufhi.
281*b2055c35SXin Li   {
282*b2055c35SXin Li     const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6);
283*b2055c35SXin Li     const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1);
284*b2055c35SXin Li     const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo);
285*b2055c35SXin Li     const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7);  // extract #7
286*b2055c35SXin Li     const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7);
287*b2055c35SXin Li     const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1);
288*b2055c35SXin Li     const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi);
289*b2055c35SXin Li     const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8);  // extract #8
290*b2055c35SXin Li     const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8);
291*b2055c35SXin Li     const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7);
292*b2055c35SXin Li     _mm_storeu_si128((__m128i*)&out[0], out_z0);
293*b2055c35SXin Li     _mm_storeu_si128((__m128i*)&out[8], out_z8);
294*b2055c35SXin Li     packed_out = _mm_packs_epi16(out_z0, out_z8);
295*b2055c35SXin Li   }
296*b2055c35SXin Li 
297*b2055c35SXin Li   // detect if all 'out' values are zeroes or not
298*b2055c35SXin Li   return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff);
299*b2055c35SXin Li }
300*b2055c35SXin Li 
301*b2055c35SXin Li #undef PSHUFB_CST
302*b2055c35SXin Li 
QuantizeBlock_SSE41(int16_t in[16],int16_t out[16],const VP8Matrix * const mtx)303*b2055c35SXin Li static int QuantizeBlock_SSE41(int16_t in[16], int16_t out[16],
304*b2055c35SXin Li                                const VP8Matrix* const mtx) {
305*b2055c35SXin Li   return DoQuantizeBlock_SSE41(in, out, &mtx->sharpen_[0], mtx);
306*b2055c35SXin Li }
307*b2055c35SXin Li 
QuantizeBlockWHT_SSE41(int16_t in[16],int16_t out[16],const VP8Matrix * const mtx)308*b2055c35SXin Li static int QuantizeBlockWHT_SSE41(int16_t in[16], int16_t out[16],
309*b2055c35SXin Li                                   const VP8Matrix* const mtx) {
310*b2055c35SXin Li   return DoQuantizeBlock_SSE41(in, out, NULL, mtx);
311*b2055c35SXin Li }
312*b2055c35SXin Li 
Quantize2Blocks_SSE41(int16_t in[32],int16_t out[32],const VP8Matrix * const mtx)313*b2055c35SXin Li static int Quantize2Blocks_SSE41(int16_t in[32], int16_t out[32],
314*b2055c35SXin Li                                  const VP8Matrix* const mtx) {
315*b2055c35SXin Li   int nz;
316*b2055c35SXin Li   const uint16_t* const sharpen = &mtx->sharpen_[0];
317*b2055c35SXin Li   nz  = DoQuantizeBlock_SSE41(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0;
318*b2055c35SXin Li   nz |= DoQuantizeBlock_SSE41(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1;
319*b2055c35SXin Li   return nz;
320*b2055c35SXin Li }
321*b2055c35SXin Li 
322*b2055c35SXin Li //------------------------------------------------------------------------------
323*b2055c35SXin Li // Entry point
324*b2055c35SXin Li 
325*b2055c35SXin Li extern void VP8EncDspInitSSE41(void);
VP8EncDspInitSSE41(void)326*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) {
327*b2055c35SXin Li   VP8CollectHistogram = CollectHistogram_SSE41;
328*b2055c35SXin Li   VP8EncQuantizeBlock = QuantizeBlock_SSE41;
329*b2055c35SXin Li   VP8EncQuantize2Blocks = Quantize2Blocks_SSE41;
330*b2055c35SXin Li   VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE41;
331*b2055c35SXin Li   VP8TDisto4x4 = Disto4x4_SSE41;
332*b2055c35SXin Li   VP8TDisto16x16 = Disto16x16_SSE41;
333*b2055c35SXin Li }
334*b2055c35SXin Li 
335*b2055c35SXin Li #else  // !WEBP_USE_SSE41
336*b2055c35SXin Li 
337*b2055c35SXin Li WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41)
338*b2055c35SXin Li 
339*b2055c35SXin Li #endif  // WEBP_USE_SSE41
340