1 /*
2 * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <immintrin.h>
13
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx/vpx_integer.h"
16 #include "vp9/common/vp9_scan.h"
17 #include "vp9/encoder/vp9_block.h"
18
load_b_values_avx2(const struct macroblock_plane * mb_plane,__m256i * zbin,__m256i * round,__m256i * quant,const int16_t * dequant_ptr,__m256i * dequant,__m256i * shift,int log_scale)19 static VPX_FORCE_INLINE void load_b_values_avx2(
20 const struct macroblock_plane *mb_plane, __m256i *zbin, __m256i *round,
21 __m256i *quant, const int16_t *dequant_ptr, __m256i *dequant,
22 __m256i *shift, int log_scale) {
23 *zbin =
24 _mm256_castsi128_si256(_mm_load_si128((const __m128i *)mb_plane->zbin));
25 *zbin = _mm256_permute4x64_epi64(*zbin, 0x54);
26 if (log_scale > 0) {
27 const __m256i rnd = _mm256_set1_epi16((int16_t)(1 << (log_scale - 1)));
28 *zbin = _mm256_add_epi16(*zbin, rnd);
29 *zbin = _mm256_srai_epi16(*zbin, log_scale);
30 }
31 // Subtracting 1 here eliminates a _mm256_cmpeq_epi16() instruction when
32 // calculating the zbin mask. (See quantize_b_logscale{0,1,2}_16)
33 *zbin = _mm256_sub_epi16(*zbin, _mm256_set1_epi16(1));
34
35 *round =
36 _mm256_castsi128_si256(_mm_load_si128((const __m128i *)mb_plane->round));
37 *round = _mm256_permute4x64_epi64(*round, 0x54);
38 if (log_scale > 0) {
39 const __m256i rnd = _mm256_set1_epi16((int16_t)(1 << (log_scale - 1)));
40 *round = _mm256_add_epi16(*round, rnd);
41 *round = _mm256_srai_epi16(*round, log_scale);
42 }
43
44 *quant =
45 _mm256_castsi128_si256(_mm_load_si128((const __m128i *)mb_plane->quant));
46 *quant = _mm256_permute4x64_epi64(*quant, 0x54);
47 *dequant =
48 _mm256_castsi128_si256(_mm_load_si128((const __m128i *)dequant_ptr));
49 *dequant = _mm256_permute4x64_epi64(*dequant, 0x54);
50 *shift = _mm256_castsi128_si256(
51 _mm_load_si128((const __m128i *)mb_plane->quant_shift));
52 *shift = _mm256_permute4x64_epi64(*shift, 0x54);
53 }
54
55 static VPX_FORCE_INLINE __m256i
load_coefficients_avx2(const tran_low_t * coeff_ptr)56 load_coefficients_avx2(const tran_low_t *coeff_ptr) {
57 #if CONFIG_VP9_HIGHBITDEPTH
58 // typedef int32_t tran_low_t;
59 const __m256i coeff1 = _mm256_loadu_si256((const __m256i *)coeff_ptr);
60 const __m256i coeff2 = _mm256_loadu_si256((const __m256i *)(coeff_ptr + 8));
61 return _mm256_packs_epi32(coeff1, coeff2);
62 #else
63 // typedef int16_t tran_low_t;
64 return _mm256_loadu_si256((const __m256i *)coeff_ptr);
65 #endif
66 }
67
store_coefficients_avx2(__m256i coeff_vals,tran_low_t * coeff_ptr)68 static VPX_FORCE_INLINE void store_coefficients_avx2(__m256i coeff_vals,
69 tran_low_t *coeff_ptr) {
70 #if CONFIG_VP9_HIGHBITDEPTH
71 // typedef int32_t tran_low_t;
72 __m256i coeff_sign = _mm256_srai_epi16(coeff_vals, 15);
73 __m256i coeff_vals_lo = _mm256_unpacklo_epi16(coeff_vals, coeff_sign);
74 __m256i coeff_vals_hi = _mm256_unpackhi_epi16(coeff_vals, coeff_sign);
75 _mm256_storeu_si256((__m256i *)coeff_ptr, coeff_vals_lo);
76 _mm256_storeu_si256((__m256i *)(coeff_ptr + 8), coeff_vals_hi);
77 #else
78 // typedef int16_t tran_low_t;
79 _mm256_storeu_si256((__m256i *)coeff_ptr, coeff_vals);
80 #endif
81 }
82
83 static VPX_FORCE_INLINE __m256i
quantize_b_16(const tran_low_t * coeff_ptr,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,__m256i * v_quant,__m256i * v_dequant,__m256i * v_round,__m256i * v_zbin,__m256i * v_quant_shift)84 quantize_b_16(const tran_low_t *coeff_ptr, tran_low_t *qcoeff_ptr,
85 tran_low_t *dqcoeff_ptr, __m256i *v_quant, __m256i *v_dequant,
86 __m256i *v_round, __m256i *v_zbin, __m256i *v_quant_shift) {
87 const __m256i v_coeff = load_coefficients_avx2(coeff_ptr);
88 const __m256i v_abs_coeff = _mm256_abs_epi16(v_coeff);
89 const __m256i v_zbin_mask = _mm256_cmpgt_epi16(v_abs_coeff, *v_zbin);
90
91 if (_mm256_movemask_epi8(v_zbin_mask) == 0) {
92 _mm256_storeu_si256((__m256i *)qcoeff_ptr, _mm256_setzero_si256());
93 _mm256_storeu_si256((__m256i *)dqcoeff_ptr, _mm256_setzero_si256());
94 #if CONFIG_VP9_HIGHBITDEPTH
95 _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), _mm256_setzero_si256());
96 _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), _mm256_setzero_si256());
97 #endif // CONFIG_VP9_HIGHBITDEPTH
98 return _mm256_setzero_si256();
99 }
100 {
101 // tmp = v_zbin_mask ? (int64_t)abs_coeff + log_scaled_round : 0
102 const __m256i v_tmp_rnd =
103 _mm256_and_si256(_mm256_adds_epi16(v_abs_coeff, *v_round), v_zbin_mask);
104
105 const __m256i v_tmp32_a = _mm256_mulhi_epi16(v_tmp_rnd, *v_quant);
106 const __m256i v_tmp32_b = _mm256_add_epi16(v_tmp32_a, v_tmp_rnd);
107 const __m256i v_tmp32 = _mm256_mulhi_epi16(v_tmp32_b, *v_quant_shift);
108 const __m256i v_nz_mask =
109 _mm256_cmpgt_epi16(v_tmp32, _mm256_setzero_si256());
110 const __m256i v_qcoeff = _mm256_sign_epi16(v_tmp32, v_coeff);
111 #if CONFIG_VP9_HIGHBITDEPTH
112 const __m256i low = _mm256_mullo_epi16(v_qcoeff, *v_dequant);
113 const __m256i high = _mm256_mulhi_epi16(v_qcoeff, *v_dequant);
114
115 const __m256i v_dqcoeff_lo = _mm256_unpacklo_epi16(low, high);
116 const __m256i v_dqcoeff_hi = _mm256_unpackhi_epi16(low, high);
117 #else
118 const __m256i v_dqcoeff = _mm256_mullo_epi16(v_qcoeff, *v_dequant);
119 #endif
120
121 store_coefficients_avx2(v_qcoeff, qcoeff_ptr);
122 #if CONFIG_VP9_HIGHBITDEPTH
123 _mm256_storeu_si256((__m256i *)(dqcoeff_ptr), v_dqcoeff_lo);
124 _mm256_storeu_si256((__m256i *)(dqcoeff_ptr + 8), v_dqcoeff_hi);
125 #else
126 store_coefficients_avx2(v_dqcoeff, dqcoeff_ptr);
127 #endif
128 return v_nz_mask;
129 }
130 }
131
get_max_lane_eob(const int16_t * iscan,__m256i v_eobmax,__m256i v_mask)132 static VPX_FORCE_INLINE __m256i get_max_lane_eob(const int16_t *iscan,
133 __m256i v_eobmax,
134 __m256i v_mask) {
135 #if CONFIG_VP9_HIGHBITDEPTH
136 const __m256i v_iscan = _mm256_permute4x64_epi64(
137 _mm256_loadu_si256((const __m256i *)iscan), 0xD8);
138 #else
139 const __m256i v_iscan = _mm256_loadu_si256((const __m256i *)iscan);
140 #endif
141 const __m256i v_nz_iscan = _mm256_and_si256(v_iscan, v_mask);
142 return _mm256_max_epi16(v_eobmax, v_nz_iscan);
143 }
144
accumulate_eob256(__m256i eob256)145 static VPX_FORCE_INLINE int16_t accumulate_eob256(__m256i eob256) {
146 const __m128i eob_lo = _mm256_castsi256_si128(eob256);
147 const __m128i eob_hi = _mm256_extractf128_si256(eob256, 1);
148 __m128i eob = _mm_max_epi16(eob_lo, eob_hi);
149 __m128i eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
150 eob = _mm_max_epi16(eob, eob_shuffled);
151 eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
152 eob = _mm_max_epi16(eob, eob_shuffled);
153 eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
154 eob = _mm_max_epi16(eob, eob_shuffled);
155 return _mm_extract_epi16(eob, 1);
156 }
157
vpx_quantize_b_avx2(const tran_low_t * coeff_ptr,intptr_t n_coeffs,const struct macroblock_plane * const mb_plane,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,const int16_t * dequant_ptr,uint16_t * eob_ptr,const struct ScanOrder * const scan_order)158 void vpx_quantize_b_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
159 const struct macroblock_plane *const mb_plane,
160 tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
161 const int16_t *dequant_ptr, uint16_t *eob_ptr,
162 const struct ScanOrder *const scan_order) {
163 __m256i v_zbin, v_round, v_quant, v_dequant, v_quant_shift, v_nz_mask;
164 __m256i v_eobmax = _mm256_setzero_si256();
165 intptr_t count;
166 const int16_t *iscan = scan_order->iscan;
167
168 load_b_values_avx2(mb_plane, &v_zbin, &v_round, &v_quant, dequant_ptr,
169 &v_dequant, &v_quant_shift, 0);
170 // Do DC and first 15 AC.
171 v_nz_mask = quantize_b_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, &v_quant,
172 &v_dequant, &v_round, &v_zbin, &v_quant_shift);
173
174 v_eobmax = get_max_lane_eob(iscan, v_eobmax, v_nz_mask);
175
176 v_round = _mm256_unpackhi_epi64(v_round, v_round);
177 v_quant = _mm256_unpackhi_epi64(v_quant, v_quant);
178 v_dequant = _mm256_unpackhi_epi64(v_dequant, v_dequant);
179 v_quant_shift = _mm256_unpackhi_epi64(v_quant_shift, v_quant_shift);
180 v_zbin = _mm256_unpackhi_epi64(v_zbin, v_zbin);
181
182 for (count = n_coeffs - 16; count > 0; count -= 16) {
183 coeff_ptr += 16;
184 qcoeff_ptr += 16;
185 dqcoeff_ptr += 16;
186 iscan += 16;
187 v_nz_mask = quantize_b_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, &v_quant,
188 &v_dequant, &v_round, &v_zbin, &v_quant_shift);
189
190 v_eobmax = get_max_lane_eob(iscan, v_eobmax, v_nz_mask);
191 }
192
193 *eob_ptr = accumulate_eob256(v_eobmax);
194 }
195
quantize_b_32x32_16(const tran_low_t * coeff_ptr,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,const int16_t * iscan,__m256i * v_quant,__m256i * v_dequant,__m256i * v_round,__m256i * v_zbin,__m256i * v_quant_shift,__m256i * v_eobmax)196 static VPX_FORCE_INLINE __m256i quantize_b_32x32_16(
197 const tran_low_t *coeff_ptr, tran_low_t *qcoeff_ptr,
198 tran_low_t *dqcoeff_ptr, const int16_t *iscan, __m256i *v_quant,
199 __m256i *v_dequant, __m256i *v_round, __m256i *v_zbin,
200 __m256i *v_quant_shift, __m256i *v_eobmax) {
201 const __m256i v_coeff = load_coefficients_avx2(coeff_ptr);
202 const __m256i v_abs_coeff = _mm256_abs_epi16(v_coeff);
203 const __m256i v_zbin_mask = _mm256_cmpgt_epi16(v_abs_coeff, *v_zbin);
204
205 if (_mm256_movemask_epi8(v_zbin_mask) == 0) {
206 _mm256_store_si256((__m256i *)qcoeff_ptr, _mm256_setzero_si256());
207 _mm256_store_si256((__m256i *)dqcoeff_ptr, _mm256_setzero_si256());
208 #if CONFIG_VP9_HIGHBITDEPTH
209 _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), _mm256_setzero_si256());
210 _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), _mm256_setzero_si256());
211 #endif
212 return *v_eobmax;
213 }
214 {
215 // tmp = v_zbin_mask ? (int64_t)abs_coeff + round : 0
216 const __m256i v_tmp_rnd =
217 _mm256_and_si256(_mm256_adds_epi16(v_abs_coeff, *v_round), v_zbin_mask);
218 // tmp32 = (int)(((((tmp * quant_ptr[rc != 0]) >> 16) + tmp) *
219 // quant_shift_ptr[rc != 0]) >> 15);
220 const __m256i v_tmp32_a = _mm256_mulhi_epi16(v_tmp_rnd, *v_quant);
221 const __m256i v_tmp32_b = _mm256_add_epi16(v_tmp32_a, v_tmp_rnd);
222 const __m256i v_tmp32_hi =
223 _mm256_slli_epi16(_mm256_mulhi_epi16(v_tmp32_b, *v_quant_shift), 1);
224 const __m256i v_tmp32_lo =
225 _mm256_srli_epi16(_mm256_mullo_epi16(v_tmp32_b, *v_quant_shift), 15);
226 const __m256i v_tmp32 = _mm256_or_si256(v_tmp32_hi, v_tmp32_lo);
227 const __m256i v_qcoeff = _mm256_sign_epi16(v_tmp32, v_coeff);
228 const __m256i v_sign_lo =
229 _mm256_unpacklo_epi16(_mm256_setzero_si256(), v_coeff);
230 const __m256i v_sign_hi =
231 _mm256_unpackhi_epi16(_mm256_setzero_si256(), v_coeff);
232 const __m256i low = _mm256_mullo_epi16(v_tmp32, *v_dequant);
233 const __m256i high = _mm256_mulhi_epi16(v_tmp32, *v_dequant);
234 const __m256i v_dqcoeff_lo = _mm256_sign_epi32(
235 _mm256_srli_epi32(_mm256_unpacklo_epi16(low, high), 1), v_sign_lo);
236 const __m256i v_dqcoeff_hi = _mm256_sign_epi32(
237 _mm256_srli_epi32(_mm256_unpackhi_epi16(low, high), 1), v_sign_hi);
238 const __m256i v_nz_mask =
239 _mm256_cmpgt_epi16(v_tmp32, _mm256_setzero_si256());
240
241 store_coefficients_avx2(v_qcoeff, qcoeff_ptr);
242
243 #if CONFIG_VP9_HIGHBITDEPTH
244 _mm256_storeu_si256((__m256i *)(dqcoeff_ptr), v_dqcoeff_lo);
245 _mm256_storeu_si256((__m256i *)(dqcoeff_ptr + 8), v_dqcoeff_hi);
246 #else
247 store_coefficients_avx2(_mm256_packs_epi32(v_dqcoeff_lo, v_dqcoeff_hi),
248 dqcoeff_ptr);
249 #endif
250
251 return get_max_lane_eob(iscan, *v_eobmax, v_nz_mask);
252 }
253 }
254
vpx_quantize_b_32x32_avx2(const tran_low_t * coeff_ptr,const struct macroblock_plane * const mb_plane,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,const int16_t * dequant_ptr,uint16_t * eob_ptr,const struct ScanOrder * const scan_order)255 void vpx_quantize_b_32x32_avx2(const tran_low_t *coeff_ptr,
256 const struct macroblock_plane *const mb_plane,
257 tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
258 const int16_t *dequant_ptr, uint16_t *eob_ptr,
259 const struct ScanOrder *const scan_order) {
260 __m256i v_zbin, v_round, v_quant, v_dequant, v_quant_shift;
261 __m256i v_eobmax = _mm256_setzero_si256();
262 intptr_t count;
263 const int16_t *iscan = scan_order->iscan;
264
265 load_b_values_avx2(mb_plane, &v_zbin, &v_round, &v_quant, dequant_ptr,
266 &v_dequant, &v_quant_shift, 1);
267
268 // Do DC and first 15 AC.
269 v_eobmax = quantize_b_32x32_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, iscan,
270 &v_quant, &v_dequant, &v_round, &v_zbin,
271 &v_quant_shift, &v_eobmax);
272
273 v_round = _mm256_unpackhi_epi64(v_round, v_round);
274 v_quant = _mm256_unpackhi_epi64(v_quant, v_quant);
275 v_dequant = _mm256_unpackhi_epi64(v_dequant, v_dequant);
276 v_quant_shift = _mm256_unpackhi_epi64(v_quant_shift, v_quant_shift);
277 v_zbin = _mm256_unpackhi_epi64(v_zbin, v_zbin);
278
279 for (count = (32 * 32) - 16; count > 0; count -= 16) {
280 coeff_ptr += 16;
281 qcoeff_ptr += 16;
282 dqcoeff_ptr += 16;
283 iscan += 16;
284 v_eobmax = quantize_b_32x32_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, iscan,
285 &v_quant, &v_dequant, &v_round, &v_zbin,
286 &v_quant_shift, &v_eobmax);
287 }
288
289 *eob_ptr = accumulate_eob256(v_eobmax);
290 }
291