1 /*
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <immintrin.h> // AVX2
12
13 #include "./vpx_dsp_rtcd.h"
14
15 /* clang-format off */
16 DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = {
17 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0,
18 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0,
19 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2,
20 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2,
21 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4,
22 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4,
23 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6,
24 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6,
25 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
26 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
27 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10,
28 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10,
29 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12,
30 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12,
31 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14,
32 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14,
33 };
34
35 DECLARE_ALIGNED(32, static const int8_t, adjacent_sub_avx2[32]) = {
36 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1,
37 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1
38 };
39 /* clang-format on */
40
variance_kernel_avx2(const __m256i src,const __m256i ref,__m256i * const sse,__m256i * const sum)41 static INLINE void variance_kernel_avx2(const __m256i src, const __m256i ref,
42 __m256i *const sse,
43 __m256i *const sum) {
44 const __m256i adj_sub = _mm256_load_si256((__m256i const *)adjacent_sub_avx2);
45
46 // unpack into pairs of source and reference values
47 const __m256i src_ref0 = _mm256_unpacklo_epi8(src, ref);
48 const __m256i src_ref1 = _mm256_unpackhi_epi8(src, ref);
49
50 // subtract adjacent elements using src*1 + ref*-1
51 const __m256i diff0 = _mm256_maddubs_epi16(src_ref0, adj_sub);
52 const __m256i diff1 = _mm256_maddubs_epi16(src_ref1, adj_sub);
53 const __m256i madd0 = _mm256_madd_epi16(diff0, diff0);
54 const __m256i madd1 = _mm256_madd_epi16(diff1, diff1);
55
56 // add to the running totals
57 *sum = _mm256_add_epi16(*sum, _mm256_add_epi16(diff0, diff1));
58 *sse = _mm256_add_epi32(*sse, _mm256_add_epi32(madd0, madd1));
59 }
60
variance_final_from_32bit_sum_avx2(__m256i vsse,__m128i vsum,unsigned int * const sse,int * const sum)61 static INLINE void variance_final_from_32bit_sum_avx2(__m256i vsse,
62 __m128i vsum,
63 unsigned int *const sse,
64 int *const sum) {
65 // extract the low lane and add it to the high lane
66 const __m128i sse_reg_128 = _mm_add_epi32(_mm256_castsi256_si128(vsse),
67 _mm256_extractf128_si256(vsse, 1));
68
69 // unpack sse and sum registers and add
70 const __m128i sse_sum_lo = _mm_unpacklo_epi32(sse_reg_128, vsum);
71 const __m128i sse_sum_hi = _mm_unpackhi_epi32(sse_reg_128, vsum);
72 const __m128i sse_sum = _mm_add_epi32(sse_sum_lo, sse_sum_hi);
73
74 // perform the final summation and extract the results
75 const __m128i res = _mm_add_epi32(sse_sum, _mm_srli_si128(sse_sum, 8));
76 *((int *)sse) = _mm_cvtsi128_si32(res);
77 *((int *)sum) = _mm_extract_epi32(res, 1);
78 }
79
variance_final_from_16bit_sum_avx2(__m256i vsse,__m256i vsum,unsigned int * const sse,int * const sum)80 static INLINE void variance_final_from_16bit_sum_avx2(__m256i vsse,
81 __m256i vsum,
82 unsigned int *const sse,
83 int *const sum) {
84 // extract the low lane and add it to the high lane
85 const __m128i sum_reg_128 = _mm_add_epi16(_mm256_castsi256_si128(vsum),
86 _mm256_extractf128_si256(vsum, 1));
87 const __m128i sum_reg_64 =
88 _mm_add_epi16(sum_reg_128, _mm_srli_si128(sum_reg_128, 8));
89 const __m128i sum_int32 = _mm_cvtepi16_epi32(sum_reg_64);
90
91 variance_final_from_32bit_sum_avx2(vsse, sum_int32, sse, sum);
92 }
93
sum_to_32bit_avx2(const __m256i sum)94 static INLINE __m256i sum_to_32bit_avx2(const __m256i sum) {
95 const __m256i sum_lo = _mm256_cvtepi16_epi32(_mm256_castsi256_si128(sum));
96 const __m256i sum_hi =
97 _mm256_cvtepi16_epi32(_mm256_extractf128_si256(sum, 1));
98 return _mm256_add_epi32(sum_lo, sum_hi);
99 }
100
variance8_kernel_avx2(const uint8_t * const src,const int src_stride,const uint8_t * const ref,const int ref_stride,__m256i * const sse,__m256i * const sum)101 static INLINE void variance8_kernel_avx2(
102 const uint8_t *const src, const int src_stride, const uint8_t *const ref,
103 const int ref_stride, __m256i *const sse, __m256i *const sum) {
104 __m128i src0, src1, ref0, ref1;
105 __m256i ss, rr, diff;
106
107 // 0 0 0.... 0 s07 s06 s05 s04 s03 s02 s01 s00
108 src0 = _mm_loadl_epi64((const __m128i *)(src + 0 * src_stride));
109
110 // 0 0 0.... 0 s17 s16 s15 s14 s13 s12 s11 s10
111 src1 = _mm_loadl_epi64((const __m128i *)(src + 1 * src_stride));
112
113 // s17 s16...s11 s10 s07 s06...s01 s00 (8bit)
114 src0 = _mm_unpacklo_epi64(src0, src1);
115
116 // s17 s16...s11 s10 s07 s06...s01 s00 (16 bit)
117 ss = _mm256_cvtepu8_epi16(src0);
118
119 // 0 0 0.... 0 r07 r06 r05 r04 r03 r02 r01 r00
120 ref0 = _mm_loadl_epi64((const __m128i *)(ref + 0 * ref_stride));
121
122 // 0 0 0.... 0 r17 r16 0 r15 0 r14 0 r13 0 r12 0 r11 0 r10
123 ref1 = _mm_loadl_epi64((const __m128i *)(ref + 1 * ref_stride));
124
125 // r17 r16...r11 r10 r07 r06...r01 r00 (8 bit)
126 ref0 = _mm_unpacklo_epi64(ref0, ref1);
127
128 // r17 r16...r11 r10 r07 r06...r01 r00 (16 bit)
129 rr = _mm256_cvtepu8_epi16(ref0);
130
131 diff = _mm256_sub_epi16(ss, rr);
132 *sse = _mm256_add_epi32(*sse, _mm256_madd_epi16(diff, diff));
133 *sum = _mm256_add_epi16(*sum, diff);
134 }
135
variance16_kernel_avx2(const uint8_t * const src,const int src_stride,const uint8_t * const ref,const int ref_stride,__m256i * const sse,__m256i * const sum)136 static INLINE void variance16_kernel_avx2(
137 const uint8_t *const src, const int src_stride, const uint8_t *const ref,
138 const int ref_stride, __m256i *const sse, __m256i *const sum) {
139 const __m128i s0 = _mm_loadu_si128((__m128i const *)(src + 0 * src_stride));
140 const __m128i s1 = _mm_loadu_si128((__m128i const *)(src + 1 * src_stride));
141 const __m128i r0 = _mm_loadu_si128((__m128i const *)(ref + 0 * ref_stride));
142 const __m128i r1 = _mm_loadu_si128((__m128i const *)(ref + 1 * ref_stride));
143 const __m256i s = _mm256_inserti128_si256(_mm256_castsi128_si256(s0), s1, 1);
144 const __m256i r = _mm256_inserti128_si256(_mm256_castsi128_si256(r0), r1, 1);
145 variance_kernel_avx2(s, r, sse, sum);
146 }
147
variance32_kernel_avx2(const uint8_t * const src,const uint8_t * const ref,__m256i * const sse,__m256i * const sum)148 static INLINE void variance32_kernel_avx2(const uint8_t *const src,
149 const uint8_t *const ref,
150 __m256i *const sse,
151 __m256i *const sum) {
152 const __m256i s = _mm256_loadu_si256((__m256i const *)(src));
153 const __m256i r = _mm256_loadu_si256((__m256i const *)(ref));
154 variance_kernel_avx2(s, r, sse, sum);
155 }
156
variance8_avx2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m256i * const vsse,__m256i * const vsum)157 static INLINE void variance8_avx2(const uint8_t *src, const int src_stride,
158 const uint8_t *ref, const int ref_stride,
159 const int h, __m256i *const vsse,
160 __m256i *const vsum) {
161 int i;
162 *vsum = _mm256_setzero_si256();
163 *vsse = _mm256_setzero_si256();
164
165 for (i = 0; i < h; i += 2) {
166 variance8_kernel_avx2(src, src_stride, ref, ref_stride, vsse, vsum);
167 src += 2 * src_stride;
168 ref += 2 * ref_stride;
169 }
170 }
171
variance16_avx2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m256i * const vsse,__m256i * const vsum)172 static INLINE void variance16_avx2(const uint8_t *src, const int src_stride,
173 const uint8_t *ref, const int ref_stride,
174 const int h, __m256i *const vsse,
175 __m256i *const vsum) {
176 int i;
177 *vsum = _mm256_setzero_si256();
178 *vsse = _mm256_setzero_si256();
179
180 for (i = 0; i < h; i += 2) {
181 variance16_kernel_avx2(src, src_stride, ref, ref_stride, vsse, vsum);
182 src += 2 * src_stride;
183 ref += 2 * ref_stride;
184 }
185 }
186
variance32_avx2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m256i * const vsse,__m256i * const vsum)187 static INLINE void variance32_avx2(const uint8_t *src, const int src_stride,
188 const uint8_t *ref, const int ref_stride,
189 const int h, __m256i *const vsse,
190 __m256i *const vsum) {
191 int i;
192 *vsum = _mm256_setzero_si256();
193 *vsse = _mm256_setzero_si256();
194
195 for (i = 0; i < h; i++) {
196 variance32_kernel_avx2(src, ref, vsse, vsum);
197 src += src_stride;
198 ref += ref_stride;
199 }
200 }
201
variance64_avx2(const uint8_t * src,const int src_stride,const uint8_t * ref,const int ref_stride,const int h,__m256i * const vsse,__m256i * const vsum)202 static INLINE void variance64_avx2(const uint8_t *src, const int src_stride,
203 const uint8_t *ref, const int ref_stride,
204 const int h, __m256i *const vsse,
205 __m256i *const vsum) {
206 int i;
207 *vsum = _mm256_setzero_si256();
208
209 for (i = 0; i < h; i++) {
210 variance32_kernel_avx2(src + 0, ref + 0, vsse, vsum);
211 variance32_kernel_avx2(src + 32, ref + 32, vsse, vsum);
212 src += src_stride;
213 ref += ref_stride;
214 }
215 }
216
vpx_get16x16var_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse,int * sum)217 void vpx_get16x16var_avx2(const uint8_t *src_ptr, int src_stride,
218 const uint8_t *ref_ptr, int ref_stride,
219 unsigned int *sse, int *sum) {
220 __m256i vsse, vsum;
221 variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
222 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, sum);
223 }
224
225 #define FILTER_SRC(filter) \
226 /* filter the source */ \
227 exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter); \
228 exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter); \
229 \
230 /* add 8 to source */ \
231 exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \
232 exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \
233 \
234 /* divide source by 16 */ \
235 exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \
236 exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4);
237
238 #define CALC_SUM_SSE_INSIDE_LOOP \
239 /* expand each byte to 2 bytes */ \
240 exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \
241 exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg); \
242 /* source - dest */ \
243 exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo); \
244 exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi); \
245 /* caculate sum */ \
246 *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_lo); \
247 exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo); \
248 *sum_reg = _mm256_add_epi16(*sum_reg, exp_src_hi); \
249 exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi); \
250 /* calculate sse */ \
251 *sse_reg = _mm256_add_epi32(*sse_reg, exp_src_lo); \
252 *sse_reg = _mm256_add_epi32(*sse_reg, exp_src_hi);
253
254 // final calculation to sum and sse
255 #define CALC_SUM_AND_SSE \
256 res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg); \
257 sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \
258 sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp); \
259 sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp); \
260 sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
261 sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \
262 \
263 sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \
264 sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \
265 \
266 sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
267 sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
268 *((int *)sse) = _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) + \
269 _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \
270 sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \
271 sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
272 sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \
273 _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1));
274
spv32_x0_y0(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg)275 static INLINE void spv32_x0_y0(const uint8_t *src, int src_stride,
276 const uint8_t *dst, int dst_stride,
277 const uint8_t *second_pred, int second_stride,
278 int do_sec, int height, __m256i *sum_reg,
279 __m256i *sse_reg) {
280 const __m256i zero_reg = _mm256_setzero_si256();
281 __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
282 int i;
283 for (i = 0; i < height; i++) {
284 const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
285 const __m256i src_reg = _mm256_loadu_si256((__m256i const *)src);
286 if (do_sec) {
287 const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
288 const __m256i avg_reg = _mm256_avg_epu8(src_reg, sec_reg);
289 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
290 exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
291 second_pred += second_stride;
292 } else {
293 exp_src_lo = _mm256_unpacklo_epi8(src_reg, zero_reg);
294 exp_src_hi = _mm256_unpackhi_epi8(src_reg, zero_reg);
295 }
296 CALC_SUM_SSE_INSIDE_LOOP
297 src += src_stride;
298 dst += dst_stride;
299 }
300 }
301
302 // (x == 0, y == 4) or (x == 4, y == 0). sstep determines the direction.
spv32_half_zero(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg,int sstep)303 static INLINE void spv32_half_zero(const uint8_t *src, int src_stride,
304 const uint8_t *dst, int dst_stride,
305 const uint8_t *second_pred,
306 int second_stride, int do_sec, int height,
307 __m256i *sum_reg, __m256i *sse_reg,
308 int sstep) {
309 const __m256i zero_reg = _mm256_setzero_si256();
310 __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
311 int i;
312 for (i = 0; i < height; i++) {
313 const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
314 const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
315 const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + sstep));
316 const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
317 if (do_sec) {
318 const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
319 const __m256i avg_reg = _mm256_avg_epu8(src_avg, sec_reg);
320 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
321 exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
322 second_pred += second_stride;
323 } else {
324 exp_src_lo = _mm256_unpacklo_epi8(src_avg, zero_reg);
325 exp_src_hi = _mm256_unpackhi_epi8(src_avg, zero_reg);
326 }
327 CALC_SUM_SSE_INSIDE_LOOP
328 src += src_stride;
329 dst += dst_stride;
330 }
331 }
332
spv32_x0_y4(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg)333 static INLINE void spv32_x0_y4(const uint8_t *src, int src_stride,
334 const uint8_t *dst, int dst_stride,
335 const uint8_t *second_pred, int second_stride,
336 int do_sec, int height, __m256i *sum_reg,
337 __m256i *sse_reg) {
338 spv32_half_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
339 do_sec, height, sum_reg, sse_reg, src_stride);
340 }
341
spv32_x4_y0(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg)342 static INLINE void spv32_x4_y0(const uint8_t *src, int src_stride,
343 const uint8_t *dst, int dst_stride,
344 const uint8_t *second_pred, int second_stride,
345 int do_sec, int height, __m256i *sum_reg,
346 __m256i *sse_reg) {
347 spv32_half_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
348 do_sec, height, sum_reg, sse_reg, 1);
349 }
350
spv32_x4_y4(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg)351 static INLINE void spv32_x4_y4(const uint8_t *src, int src_stride,
352 const uint8_t *dst, int dst_stride,
353 const uint8_t *second_pred, int second_stride,
354 int do_sec, int height, __m256i *sum_reg,
355 __m256i *sse_reg) {
356 const __m256i zero_reg = _mm256_setzero_si256();
357 const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
358 const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
359 __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b);
360 __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
361 int i;
362 src += src_stride;
363 for (i = 0; i < height; i++) {
364 const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
365 const __m256i src_0 = _mm256_loadu_si256((__m256i const *)(src));
366 const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
367 const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
368 const __m256i current_avg = _mm256_avg_epu8(prev_src_avg, src_avg);
369 prev_src_avg = src_avg;
370
371 if (do_sec) {
372 const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
373 const __m256i avg_reg = _mm256_avg_epu8(current_avg, sec_reg);
374 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
375 exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
376 second_pred += second_stride;
377 } else {
378 exp_src_lo = _mm256_unpacklo_epi8(current_avg, zero_reg);
379 exp_src_hi = _mm256_unpackhi_epi8(current_avg, zero_reg);
380 }
381 // save current source average
382 CALC_SUM_SSE_INSIDE_LOOP
383 dst += dst_stride;
384 src += src_stride;
385 }
386 }
387
388 // (x == 0, y == bil) or (x == 4, y == bil). sstep determines the direction.
spv32_bilin_zero(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg,int offset,int sstep)389 static INLINE void spv32_bilin_zero(const uint8_t *src, int src_stride,
390 const uint8_t *dst, int dst_stride,
391 const uint8_t *second_pred,
392 int second_stride, int do_sec, int height,
393 __m256i *sum_reg, __m256i *sse_reg,
394 int offset, int sstep) {
395 const __m256i zero_reg = _mm256_setzero_si256();
396 const __m256i pw8 = _mm256_set1_epi16(8);
397 const __m256i filter = _mm256_load_si256(
398 (__m256i const *)(bilinear_filters_avx2 + (offset << 5)));
399 __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
400 int i;
401 for (i = 0; i < height; i++) {
402 const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
403 const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
404 const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + sstep));
405 exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
406 exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
407
408 FILTER_SRC(filter)
409 if (do_sec) {
410 const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
411 const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
412 const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
413 second_pred += second_stride;
414 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
415 exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
416 }
417 CALC_SUM_SSE_INSIDE_LOOP
418 src += src_stride;
419 dst += dst_stride;
420 }
421 }
422
spv32_x0_yb(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg,int y_offset)423 static INLINE void spv32_x0_yb(const uint8_t *src, int src_stride,
424 const uint8_t *dst, int dst_stride,
425 const uint8_t *second_pred, int second_stride,
426 int do_sec, int height, __m256i *sum_reg,
427 __m256i *sse_reg, int y_offset) {
428 spv32_bilin_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
429 do_sec, height, sum_reg, sse_reg, y_offset, src_stride);
430 }
431
spv32_xb_y0(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg,int x_offset)432 static INLINE void spv32_xb_y0(const uint8_t *src, int src_stride,
433 const uint8_t *dst, int dst_stride,
434 const uint8_t *second_pred, int second_stride,
435 int do_sec, int height, __m256i *sum_reg,
436 __m256i *sse_reg, int x_offset) {
437 spv32_bilin_zero(src, src_stride, dst, dst_stride, second_pred, second_stride,
438 do_sec, height, sum_reg, sse_reg, x_offset, 1);
439 }
440
spv32_x4_yb(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg,int y_offset)441 static INLINE void spv32_x4_yb(const uint8_t *src, int src_stride,
442 const uint8_t *dst, int dst_stride,
443 const uint8_t *second_pred, int second_stride,
444 int do_sec, int height, __m256i *sum_reg,
445 __m256i *sse_reg, int y_offset) {
446 const __m256i zero_reg = _mm256_setzero_si256();
447 const __m256i pw8 = _mm256_set1_epi16(8);
448 const __m256i filter = _mm256_load_si256(
449 (__m256i const *)(bilinear_filters_avx2 + (y_offset << 5)));
450 const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
451 const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
452 __m256i prev_src_avg = _mm256_avg_epu8(src_a, src_b);
453 __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
454 int i;
455 src += src_stride;
456 for (i = 0; i < height; i++) {
457 const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
458 const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
459 const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
460 const __m256i src_avg = _mm256_avg_epu8(src_0, src_1);
461 exp_src_lo = _mm256_unpacklo_epi8(prev_src_avg, src_avg);
462 exp_src_hi = _mm256_unpackhi_epi8(prev_src_avg, src_avg);
463 prev_src_avg = src_avg;
464
465 FILTER_SRC(filter)
466 if (do_sec) {
467 const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
468 const __m256i exp_src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
469 const __m256i avg_reg = _mm256_avg_epu8(exp_src_avg, sec_reg);
470 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
471 exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
472 second_pred += second_stride;
473 }
474 CALC_SUM_SSE_INSIDE_LOOP
475 dst += dst_stride;
476 src += src_stride;
477 }
478 }
479
spv32_xb_y4(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg,int x_offset)480 static INLINE void spv32_xb_y4(const uint8_t *src, int src_stride,
481 const uint8_t *dst, int dst_stride,
482 const uint8_t *second_pred, int second_stride,
483 int do_sec, int height, __m256i *sum_reg,
484 __m256i *sse_reg, int x_offset) {
485 const __m256i zero_reg = _mm256_setzero_si256();
486 const __m256i pw8 = _mm256_set1_epi16(8);
487 const __m256i filter = _mm256_load_si256(
488 (__m256i const *)(bilinear_filters_avx2 + (x_offset << 5)));
489 const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
490 const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
491 __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
492 __m256i src_reg, src_pack;
493 int i;
494 exp_src_lo = _mm256_unpacklo_epi8(src_a, src_b);
495 exp_src_hi = _mm256_unpackhi_epi8(src_a, src_b);
496 FILTER_SRC(filter)
497 // convert each 16 bit to 8 bit to each low and high lane source
498 src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
499
500 src += src_stride;
501 for (i = 0; i < height; i++) {
502 const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
503 const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
504 const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
505 exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
506 exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
507
508 FILTER_SRC(filter)
509
510 src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
511 // average between previous pack to the current
512 src_pack = _mm256_avg_epu8(src_pack, src_reg);
513
514 if (do_sec) {
515 const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
516 const __m256i avg_pack = _mm256_avg_epu8(src_pack, sec_reg);
517 exp_src_lo = _mm256_unpacklo_epi8(avg_pack, zero_reg);
518 exp_src_hi = _mm256_unpackhi_epi8(avg_pack, zero_reg);
519 second_pred += second_stride;
520 } else {
521 exp_src_lo = _mm256_unpacklo_epi8(src_pack, zero_reg);
522 exp_src_hi = _mm256_unpackhi_epi8(src_pack, zero_reg);
523 }
524 CALC_SUM_SSE_INSIDE_LOOP
525 src_pack = src_reg;
526 dst += dst_stride;
527 src += src_stride;
528 }
529 }
530
spv32_xb_yb(const uint8_t * src,int src_stride,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,__m256i * sum_reg,__m256i * sse_reg,int x_offset,int y_offset)531 static INLINE void spv32_xb_yb(const uint8_t *src, int src_stride,
532 const uint8_t *dst, int dst_stride,
533 const uint8_t *second_pred, int second_stride,
534 int do_sec, int height, __m256i *sum_reg,
535 __m256i *sse_reg, int x_offset, int y_offset) {
536 const __m256i zero_reg = _mm256_setzero_si256();
537 const __m256i pw8 = _mm256_set1_epi16(8);
538 const __m256i xfilter = _mm256_load_si256(
539 (__m256i const *)(bilinear_filters_avx2 + (x_offset << 5)));
540 const __m256i yfilter = _mm256_load_si256(
541 (__m256i const *)(bilinear_filters_avx2 + (y_offset << 5)));
542 const __m256i src_a = _mm256_loadu_si256((__m256i const *)src);
543 const __m256i src_b = _mm256_loadu_si256((__m256i const *)(src + 1));
544 __m256i exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi;
545 __m256i prev_src_pack, src_pack;
546 int i;
547 exp_src_lo = _mm256_unpacklo_epi8(src_a, src_b);
548 exp_src_hi = _mm256_unpackhi_epi8(src_a, src_b);
549 FILTER_SRC(xfilter)
550 // convert each 16 bit to 8 bit to each low and high lane source
551 prev_src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
552 src += src_stride;
553
554 for (i = 0; i < height; i++) {
555 const __m256i dst_reg = _mm256_loadu_si256((__m256i const *)dst);
556 const __m256i src_0 = _mm256_loadu_si256((__m256i const *)src);
557 const __m256i src_1 = _mm256_loadu_si256((__m256i const *)(src + 1));
558 exp_src_lo = _mm256_unpacklo_epi8(src_0, src_1);
559 exp_src_hi = _mm256_unpackhi_epi8(src_0, src_1);
560
561 FILTER_SRC(xfilter)
562 src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
563
564 // merge previous pack to current pack source
565 exp_src_lo = _mm256_unpacklo_epi8(prev_src_pack, src_pack);
566 exp_src_hi = _mm256_unpackhi_epi8(prev_src_pack, src_pack);
567
568 FILTER_SRC(yfilter)
569 if (do_sec) {
570 const __m256i sec_reg = _mm256_loadu_si256((__m256i const *)second_pred);
571 const __m256i exp_src = _mm256_packus_epi16(exp_src_lo, exp_src_hi);
572 const __m256i avg_reg = _mm256_avg_epu8(exp_src, sec_reg);
573 exp_src_lo = _mm256_unpacklo_epi8(avg_reg, zero_reg);
574 exp_src_hi = _mm256_unpackhi_epi8(avg_reg, zero_reg);
575 second_pred += second_stride;
576 }
577
578 prev_src_pack = src_pack;
579
580 CALC_SUM_SSE_INSIDE_LOOP
581 dst += dst_stride;
582 src += src_stride;
583 }
584 }
585
sub_pix_var32xh(const uint8_t * src,int src_stride,int x_offset,int y_offset,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int do_sec,int height,unsigned int * sse)586 static INLINE int sub_pix_var32xh(const uint8_t *src, int src_stride,
587 int x_offset, int y_offset,
588 const uint8_t *dst, int dst_stride,
589 const uint8_t *second_pred, int second_stride,
590 int do_sec, int height, unsigned int *sse) {
591 const __m256i zero_reg = _mm256_setzero_si256();
592 __m256i sum_reg = _mm256_setzero_si256();
593 __m256i sse_reg = _mm256_setzero_si256();
594 __m256i sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi;
595 int sum;
596 // x_offset = 0 and y_offset = 0
597 if (x_offset == 0) {
598 if (y_offset == 0) {
599 spv32_x0_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
600 do_sec, height, &sum_reg, &sse_reg);
601 // x_offset = 0 and y_offset = 4
602 } else if (y_offset == 4) {
603 spv32_x0_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
604 do_sec, height, &sum_reg, &sse_reg);
605 // x_offset = 0 and y_offset = bilin interpolation
606 } else {
607 spv32_x0_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
608 do_sec, height, &sum_reg, &sse_reg, y_offset);
609 }
610 // x_offset = 4 and y_offset = 0
611 } else if (x_offset == 4) {
612 if (y_offset == 0) {
613 spv32_x4_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
614 do_sec, height, &sum_reg, &sse_reg);
615 // x_offset = 4 and y_offset = 4
616 } else if (y_offset == 4) {
617 spv32_x4_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
618 do_sec, height, &sum_reg, &sse_reg);
619 // x_offset = 4 and y_offset = bilin interpolation
620 } else {
621 spv32_x4_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
622 do_sec, height, &sum_reg, &sse_reg, y_offset);
623 }
624 // x_offset = bilin interpolation and y_offset = 0
625 } else {
626 if (y_offset == 0) {
627 spv32_xb_y0(src, src_stride, dst, dst_stride, second_pred, second_stride,
628 do_sec, height, &sum_reg, &sse_reg, x_offset);
629 // x_offset = bilin interpolation and y_offset = 4
630 } else if (y_offset == 4) {
631 spv32_xb_y4(src, src_stride, dst, dst_stride, second_pred, second_stride,
632 do_sec, height, &sum_reg, &sse_reg, x_offset);
633 // x_offset = bilin interpolation and y_offset = bilin interpolation
634 } else {
635 spv32_xb_yb(src, src_stride, dst, dst_stride, second_pred, second_stride,
636 do_sec, height, &sum_reg, &sse_reg, x_offset, y_offset);
637 }
638 }
639 CALC_SUM_AND_SSE
640 return sum;
641 }
642
sub_pixel_variance32xh_avx2(const uint8_t * src,int src_stride,int x_offset,int y_offset,const uint8_t * dst,int dst_stride,int height,unsigned int * sse)643 static int sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride,
644 int x_offset, int y_offset,
645 const uint8_t *dst, int dst_stride,
646 int height, unsigned int *sse) {
647 return sub_pix_var32xh(src, src_stride, x_offset, y_offset, dst, dst_stride,
648 NULL, 0, 0, height, sse);
649 }
650
sub_pixel_avg_variance32xh_avx2(const uint8_t * src,int src_stride,int x_offset,int y_offset,const uint8_t * dst,int dst_stride,const uint8_t * second_pred,int second_stride,int height,unsigned int * sse)651 static int sub_pixel_avg_variance32xh_avx2(const uint8_t *src, int src_stride,
652 int x_offset, int y_offset,
653 const uint8_t *dst, int dst_stride,
654 const uint8_t *second_pred,
655 int second_stride, int height,
656 unsigned int *sse) {
657 return sub_pix_var32xh(src, src_stride, x_offset, y_offset, dst, dst_stride,
658 second_pred, second_stride, 1, height, sse);
659 }
660
661 typedef void (*get_var_avx2)(const uint8_t *src_ptr, int src_stride,
662 const uint8_t *ref_ptr, int ref_stride,
663 unsigned int *sse, int *sum);
664
vpx_variance8x4_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)665 unsigned int vpx_variance8x4_avx2(const uint8_t *src_ptr, int src_stride,
666 const uint8_t *ref_ptr, int ref_stride,
667 unsigned int *sse) {
668 __m256i vsse, vsum;
669 int sum;
670 variance8_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 4, &vsse, &vsum);
671 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
672 return *sse - ((sum * sum) >> 5);
673 }
674
vpx_variance8x8_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)675 unsigned int vpx_variance8x8_avx2(const uint8_t *src_ptr, int src_stride,
676 const uint8_t *ref_ptr, int ref_stride,
677 unsigned int *sse) {
678 __m256i vsse, vsum;
679 int sum;
680 variance8_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
681 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
682 return *sse - ((sum * sum) >> 6);
683 }
684
vpx_variance8x16_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)685 unsigned int vpx_variance8x16_avx2(const uint8_t *src_ptr, int src_stride,
686 const uint8_t *ref_ptr, int ref_stride,
687 unsigned int *sse) {
688 __m256i vsse, vsum;
689 int sum;
690 variance8_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
691 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
692 return *sse - ((sum * sum) >> 7);
693 }
694
vpx_variance16x8_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)695 unsigned int vpx_variance16x8_avx2(const uint8_t *src_ptr, int src_stride,
696 const uint8_t *ref_ptr, int ref_stride,
697 unsigned int *sse) {
698 int sum;
699 __m256i vsse, vsum;
700 variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
701 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
702 return *sse - (uint32_t)(((int64_t)sum * sum) >> 7);
703 }
704
vpx_variance16x16_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)705 unsigned int vpx_variance16x16_avx2(const uint8_t *src_ptr, int src_stride,
706 const uint8_t *ref_ptr, int ref_stride,
707 unsigned int *sse) {
708 int sum;
709 __m256i vsse, vsum;
710 variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
711 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
712 return *sse - (uint32_t)(((int64_t)sum * sum) >> 8);
713 }
714
vpx_variance16x32_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)715 unsigned int vpx_variance16x32_avx2(const uint8_t *src_ptr, int src_stride,
716 const uint8_t *ref_ptr, int ref_stride,
717 unsigned int *sse) {
718 int sum;
719 __m256i vsse, vsum;
720 variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
721 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
722 return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
723 }
724
vpx_variance32x16_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)725 unsigned int vpx_variance32x16_avx2(const uint8_t *src_ptr, int src_stride,
726 const uint8_t *ref_ptr, int ref_stride,
727 unsigned int *sse) {
728 int sum;
729 __m256i vsse, vsum;
730 variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
731 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
732 return *sse - (uint32_t)(((int64_t)sum * sum) >> 9);
733 }
734
vpx_variance32x32_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)735 unsigned int vpx_variance32x32_avx2(const uint8_t *src_ptr, int src_stride,
736 const uint8_t *ref_ptr, int ref_stride,
737 unsigned int *sse) {
738 int sum;
739 __m256i vsse, vsum;
740 __m128i vsum_128;
741 variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
742 vsum_128 = _mm_add_epi16(_mm256_castsi256_si128(vsum),
743 _mm256_extractf128_si256(vsum, 1));
744 vsum_128 = _mm_add_epi32(_mm_cvtepi16_epi32(vsum_128),
745 _mm_cvtepi16_epi32(_mm_srli_si128(vsum_128, 8)));
746 variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
747 return *sse - (uint32_t)(((int64_t)sum * sum) >> 10);
748 }
749
vpx_variance32x64_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)750 unsigned int vpx_variance32x64_avx2(const uint8_t *src_ptr, int src_stride,
751 const uint8_t *ref_ptr, int ref_stride,
752 unsigned int *sse) {
753 int sum;
754 __m256i vsse, vsum;
755 __m128i vsum_128;
756 variance32_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 64, &vsse, &vsum);
757 vsum = sum_to_32bit_avx2(vsum);
758 vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
759 _mm256_extractf128_si256(vsum, 1));
760 variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
761 return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
762 }
763
vpx_variance64x32_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)764 unsigned int vpx_variance64x32_avx2(const uint8_t *src_ptr, int src_stride,
765 const uint8_t *ref_ptr, int ref_stride,
766 unsigned int *sse) {
767 __m256i vsse = _mm256_setzero_si256();
768 __m256i vsum = _mm256_setzero_si256();
769 __m128i vsum_128;
770 int sum;
771 variance64_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 32, &vsse, &vsum);
772 vsum = sum_to_32bit_avx2(vsum);
773 vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
774 _mm256_extractf128_si256(vsum, 1));
775 variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
776 return *sse - (uint32_t)(((int64_t)sum * sum) >> 11);
777 }
778
vpx_variance64x64_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)779 unsigned int vpx_variance64x64_avx2(const uint8_t *src_ptr, int src_stride,
780 const uint8_t *ref_ptr, int ref_stride,
781 unsigned int *sse) {
782 __m256i vsse = _mm256_setzero_si256();
783 __m256i vsum = _mm256_setzero_si256();
784 __m128i vsum_128;
785 int sum;
786 int i = 0;
787
788 for (i = 0; i < 2; i++) {
789 __m256i vsum16;
790 variance64_avx2(src_ptr + 32 * i * src_stride, src_stride,
791 ref_ptr + 32 * i * ref_stride, ref_stride, 32, &vsse,
792 &vsum16);
793 vsum = _mm256_add_epi32(vsum, sum_to_32bit_avx2(vsum16));
794 }
795 vsum_128 = _mm_add_epi32(_mm256_castsi256_si128(vsum),
796 _mm256_extractf128_si256(vsum, 1));
797 variance_final_from_32bit_sum_avx2(vsse, vsum_128, sse, &sum);
798 return *sse - (unsigned int)(((int64_t)sum * sum) >> 12);
799 }
800
vpx_mse16x8_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)801 unsigned int vpx_mse16x8_avx2(const uint8_t *src_ptr, int src_stride,
802 const uint8_t *ref_ptr, int ref_stride,
803 unsigned int *sse) {
804 int sum;
805 __m256i vsse, vsum;
806 variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 8, &vsse, &vsum);
807 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
808 return *sse;
809 }
810
vpx_mse16x16_avx2(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)811 unsigned int vpx_mse16x16_avx2(const uint8_t *src_ptr, int src_stride,
812 const uint8_t *ref_ptr, int ref_stride,
813 unsigned int *sse) {
814 int sum;
815 __m256i vsse, vsum;
816 variance16_avx2(src_ptr, src_stride, ref_ptr, ref_stride, 16, &vsse, &vsum);
817 variance_final_from_16bit_sum_avx2(vsse, vsum, sse, &sum);
818 return *sse;
819 }
820
vpx_sub_pixel_variance64x64_avx2(const uint8_t * src_ptr,int src_stride,int x_offset,int y_offset,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)821 unsigned int vpx_sub_pixel_variance64x64_avx2(
822 const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
823 const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) {
824 unsigned int sse1;
825 const int se1 = sub_pixel_variance32xh_avx2(
826 src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, 64, &sse1);
827 unsigned int sse2;
828 const int se2 =
829 sub_pixel_variance32xh_avx2(src_ptr + 32, src_stride, x_offset, y_offset,
830 ref_ptr + 32, ref_stride, 64, &sse2);
831 const int se = se1 + se2;
832 *sse = sse1 + sse2;
833 return *sse - (uint32_t)(((int64_t)se * se) >> 12);
834 }
835
vpx_sub_pixel_variance32x32_avx2(const uint8_t * src_ptr,int src_stride,int x_offset,int y_offset,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse)836 unsigned int vpx_sub_pixel_variance32x32_avx2(
837 const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
838 const uint8_t *ref_ptr, int ref_stride, unsigned int *sse) {
839 const int se = sub_pixel_variance32xh_avx2(
840 src_ptr, src_stride, x_offset, y_offset, ref_ptr, ref_stride, 32, sse);
841 return *sse - (uint32_t)(((int64_t)se * se) >> 10);
842 }
843
vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t * src_ptr,int src_stride,int x_offset,int y_offset,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse,const uint8_t * second_pred)844 unsigned int vpx_sub_pixel_avg_variance64x64_avx2(
845 const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
846 const uint8_t *ref_ptr, int ref_stride, unsigned int *sse,
847 const uint8_t *second_pred) {
848 unsigned int sse1;
849 const int se1 = sub_pixel_avg_variance32xh_avx2(src_ptr, src_stride, x_offset,
850 y_offset, ref_ptr, ref_stride,
851 second_pred, 64, 64, &sse1);
852 unsigned int sse2;
853 const int se2 = sub_pixel_avg_variance32xh_avx2(
854 src_ptr + 32, src_stride, x_offset, y_offset, ref_ptr + 32, ref_stride,
855 second_pred + 32, 64, 64, &sse2);
856 const int se = se1 + se2;
857
858 *sse = sse1 + sse2;
859
860 return *sse - (uint32_t)(((int64_t)se * se) >> 12);
861 }
862
vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t * src_ptr,int src_stride,int x_offset,int y_offset,const uint8_t * ref_ptr,int ref_stride,unsigned int * sse,const uint8_t * second_pred)863 unsigned int vpx_sub_pixel_avg_variance32x32_avx2(
864 const uint8_t *src_ptr, int src_stride, int x_offset, int y_offset,
865 const uint8_t *ref_ptr, int ref_stride, unsigned int *sse,
866 const uint8_t *second_pred) {
867 // Process 32 elements in parallel.
868 const int se = sub_pixel_avg_variance32xh_avx2(src_ptr, src_stride, x_offset,
869 y_offset, ref_ptr, ref_stride,
870 second_pred, 32, 32, sse);
871 return *sse - (uint32_t)(((int64_t)se * se) >> 10);
872 }
873