1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <emmintrin.h>
13
14 #include "aom/aom_integer.h"
15
load_b_values(const int16_t * zbin_ptr,__m128i * zbin,const int16_t * round_ptr,__m128i * round,const int16_t * quant_ptr,__m128i * quant,const int16_t * dequant_ptr,__m128i * dequant,const int16_t * shift_ptr,__m128i * shift)16 static inline void load_b_values(const int16_t *zbin_ptr, __m128i *zbin,
17 const int16_t *round_ptr, __m128i *round,
18 const int16_t *quant_ptr, __m128i *quant,
19 const int16_t *dequant_ptr, __m128i *dequant,
20 const int16_t *shift_ptr, __m128i *shift) {
21 *zbin = _mm_load_si128((const __m128i *)zbin_ptr);
22 *round = _mm_load_si128((const __m128i *)round_ptr);
23 *quant = _mm_load_si128((const __m128i *)quant_ptr);
24 *zbin = _mm_sub_epi16(*zbin, _mm_set1_epi16(1));
25 *dequant = _mm_load_si128((const __m128i *)dequant_ptr);
26 *shift = _mm_load_si128((const __m128i *)shift_ptr);
27 }
28
29 // With ssse3 and later abs() and sign() are preferred.
invert_sign_sse2(__m128i a,__m128i sign)30 static inline __m128i invert_sign_sse2(__m128i a, __m128i sign) {
31 a = _mm_xor_si128(a, sign);
32 return _mm_sub_epi16(a, sign);
33 }
34
invert_sign_32_sse2(__m128i a,__m128i sign)35 static inline __m128i invert_sign_32_sse2(__m128i a, __m128i sign) {
36 a = _mm_xor_si128(a, sign);
37 return _mm_sub_epi32(a, sign);
38 }
39
calculate_qcoeff(__m128i * coeff,const __m128i round,const __m128i quant,const __m128i shift)40 static inline void calculate_qcoeff(__m128i *coeff, const __m128i round,
41 const __m128i quant, const __m128i shift) {
42 __m128i tmp, qcoeff;
43 qcoeff = _mm_adds_epi16(*coeff, round);
44 tmp = _mm_mulhi_epi16(qcoeff, quant);
45 qcoeff = _mm_add_epi16(tmp, qcoeff);
46 *coeff = _mm_mulhi_epi16(qcoeff, shift);
47 }
48
calculate_qcoeff_log_scale(__m128i * coeff,const __m128i round,const __m128i quant,const __m128i * shift,const int * log_scale)49 static inline void calculate_qcoeff_log_scale(__m128i *coeff,
50 const __m128i round,
51 const __m128i quant,
52 const __m128i *shift,
53 const int *log_scale) {
54 __m128i tmp, tmp1, qcoeff;
55 qcoeff = _mm_adds_epi16(*coeff, round);
56 tmp = _mm_mulhi_epi16(qcoeff, quant);
57 qcoeff = _mm_add_epi16(tmp, qcoeff);
58 tmp = _mm_mullo_epi16(qcoeff, *shift);
59 tmp = _mm_srli_epi16(tmp, (16 - *log_scale));
60 tmp1 = _mm_mulhi_epi16(qcoeff, *shift);
61 tmp1 = _mm_slli_epi16(tmp1, *log_scale);
62 *coeff = _mm_or_si128(tmp, tmp1);
63 }
64
calculate_dqcoeff(__m128i qcoeff,__m128i dequant)65 static inline __m128i calculate_dqcoeff(__m128i qcoeff, __m128i dequant) {
66 return _mm_mullo_epi16(qcoeff, dequant);
67 }
68
calculate_dqcoeff_and_store_log_scale(__m128i qcoeff,__m128i dequant,const __m128i zero,tran_low_t * dqcoeff,const int * log_scale)69 static inline void calculate_dqcoeff_and_store_log_scale(__m128i qcoeff,
70 __m128i dequant,
71 const __m128i zero,
72 tran_low_t *dqcoeff,
73 const int *log_scale) {
74 // calculate abs
75 __m128i coeff_sign = _mm_srai_epi16(qcoeff, 15);
76 __m128i coeff = invert_sign_sse2(qcoeff, coeff_sign);
77
78 const __m128i sign_0 = _mm_unpacklo_epi16(coeff_sign, zero);
79 const __m128i sign_1 = _mm_unpackhi_epi16(coeff_sign, zero);
80
81 const __m128i low = _mm_mullo_epi16(coeff, dequant);
82 const __m128i high = _mm_mulhi_epi16(coeff, dequant);
83 __m128i dqcoeff32_0 = _mm_unpacklo_epi16(low, high);
84 __m128i dqcoeff32_1 = _mm_unpackhi_epi16(low, high);
85
86 dqcoeff32_0 = _mm_srli_epi32(dqcoeff32_0, *log_scale);
87 dqcoeff32_1 = _mm_srli_epi32(dqcoeff32_1, *log_scale);
88
89 dqcoeff32_0 = invert_sign_32_sse2(dqcoeff32_0, sign_0);
90 dqcoeff32_1 = invert_sign_32_sse2(dqcoeff32_1, sign_1);
91
92 _mm_store_si128((__m128i *)(dqcoeff), dqcoeff32_0);
93 _mm_store_si128((__m128i *)(dqcoeff + 4), dqcoeff32_1);
94 }
95
96 // Scan 16 values for eob reference in scan_ptr. Use masks (-1) from comparing
97 // to zbin to add 1 to the index in 'scan'.
scan_for_eob(__m128i * coeff0,__m128i * coeff1,const __m128i zbin_mask0,const __m128i zbin_mask1,const int16_t * scan_ptr,const int index,const __m128i zero)98 static inline __m128i scan_for_eob(__m128i *coeff0, __m128i *coeff1,
99 const __m128i zbin_mask0,
100 const __m128i zbin_mask1,
101 const int16_t *scan_ptr, const int index,
102 const __m128i zero) {
103 const __m128i zero_coeff0 = _mm_cmpeq_epi16(*coeff0, zero);
104 const __m128i zero_coeff1 = _mm_cmpeq_epi16(*coeff1, zero);
105 __m128i scan0 = _mm_load_si128((const __m128i *)(scan_ptr + index));
106 __m128i scan1 = _mm_load_si128((const __m128i *)(scan_ptr + index + 8));
107 __m128i eob0, eob1;
108 // Add one to convert from indices to counts
109 scan0 = _mm_sub_epi16(scan0, zbin_mask0);
110 scan1 = _mm_sub_epi16(scan1, zbin_mask1);
111 eob0 = _mm_andnot_si128(zero_coeff0, scan0);
112 eob1 = _mm_andnot_si128(zero_coeff1, scan1);
113 return _mm_max_epi16(eob0, eob1);
114 }
115
accumulate_eob(__m128i eob)116 static inline int16_t accumulate_eob(__m128i eob) {
117 __m128i eob_shuffled;
118 eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
119 eob = _mm_max_epi16(eob, eob_shuffled);
120 eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
121 eob = _mm_max_epi16(eob, eob_shuffled);
122 eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
123 eob = _mm_max_epi16(eob, eob_shuffled);
124 return _mm_extract_epi16(eob, 1);
125 }
126
load_coefficients(const tran_low_t * coeff_ptr)127 static inline __m128i load_coefficients(const tran_low_t *coeff_ptr) {
128 assert(sizeof(tran_low_t) == 4);
129 const __m128i coeff1 = _mm_load_si128((__m128i *)(coeff_ptr));
130 const __m128i coeff2 = _mm_load_si128((__m128i *)(coeff_ptr + 4));
131 return _mm_packs_epi32(coeff1, coeff2);
132 }
133
store_coefficients(__m128i coeff_vals,tran_low_t * coeff_ptr)134 static inline void store_coefficients(__m128i coeff_vals,
135 tran_low_t *coeff_ptr) {
136 assert(sizeof(tran_low_t) == 4);
137
138 __m128i one = _mm_set1_epi16(1);
139 __m128i coeff_vals_hi = _mm_mulhi_epi16(coeff_vals, one);
140 __m128i coeff_vals_lo = _mm_mullo_epi16(coeff_vals, one);
141 __m128i coeff_vals_1 = _mm_unpacklo_epi16(coeff_vals_lo, coeff_vals_hi);
142 __m128i coeff_vals_2 = _mm_unpackhi_epi16(coeff_vals_lo, coeff_vals_hi);
143 _mm_store_si128((__m128i *)(coeff_ptr), coeff_vals_1);
144 _mm_store_si128((__m128i *)(coeff_ptr + 4), coeff_vals_2);
145 }
146
update_mask1(__m128i * cmp_mask0,__m128i * cmp_mask1,const int16_t * iscan_ptr,int * is_found,__m128i * mask)147 static inline void update_mask1(__m128i *cmp_mask0, __m128i *cmp_mask1,
148 const int16_t *iscan_ptr, int *is_found,
149 __m128i *mask) {
150 __m128i all_zero;
151 __m128i temp_mask = _mm_setzero_si128();
152 all_zero = _mm_or_si128(*cmp_mask0, *cmp_mask1);
153 if (_mm_movemask_epi8(all_zero)) {
154 __m128i iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr));
155 __m128i mask0 = _mm_and_si128(*cmp_mask0, iscan0);
156 __m128i iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + 8));
157 __m128i mask1 = _mm_and_si128(*cmp_mask1, iscan1);
158 temp_mask = _mm_max_epi16(mask0, mask1);
159 *is_found = 1;
160 }
161 *mask = _mm_max_epi16(temp_mask, *mask);
162 }
163
update_mask0(__m128i * qcoeff0,__m128i * qcoeff1,__m128i * threshold,const int16_t * iscan_ptr,int * is_found,__m128i * mask)164 static inline void update_mask0(__m128i *qcoeff0, __m128i *qcoeff1,
165 __m128i *threshold, const int16_t *iscan_ptr,
166 int *is_found, __m128i *mask) {
167 __m128i zero = _mm_setzero_si128();
168 __m128i coeff[4], cmp_mask0, cmp_mask1, cmp_mask2, cmp_mask3;
169
170 coeff[0] = _mm_unpacklo_epi16(*qcoeff0, zero);
171 coeff[1] = _mm_unpackhi_epi16(*qcoeff0, zero);
172 coeff[2] = _mm_unpacklo_epi16(*qcoeff1, zero);
173 coeff[3] = _mm_unpackhi_epi16(*qcoeff1, zero);
174
175 coeff[0] = _mm_slli_epi32(coeff[0], AOM_QM_BITS);
176 cmp_mask0 = _mm_cmpgt_epi32(coeff[0], threshold[0]);
177 coeff[1] = _mm_slli_epi32(coeff[1], AOM_QM_BITS);
178 cmp_mask1 = _mm_cmpgt_epi32(coeff[1], threshold[1]);
179 coeff[2] = _mm_slli_epi32(coeff[2], AOM_QM_BITS);
180 cmp_mask2 = _mm_cmpgt_epi32(coeff[2], threshold[1]);
181 coeff[3] = _mm_slli_epi32(coeff[3], AOM_QM_BITS);
182 cmp_mask3 = _mm_cmpgt_epi32(coeff[3], threshold[1]);
183
184 cmp_mask0 = _mm_packs_epi32(cmp_mask0, cmp_mask1);
185 cmp_mask1 = _mm_packs_epi32(cmp_mask2, cmp_mask3);
186
187 update_mask1(&cmp_mask0, &cmp_mask1, iscan_ptr, is_found, mask);
188 }
189
calculate_non_zero_count(__m128i mask)190 static inline int calculate_non_zero_count(__m128i mask) {
191 __m128i mask0, mask1;
192 int non_zero_count = 0;
193 mask0 = _mm_unpackhi_epi64(mask, mask);
194 mask1 = _mm_max_epi16(mask0, mask);
195 mask0 = _mm_shuffle_epi32(mask1, 1);
196 mask0 = _mm_max_epi16(mask0, mask1);
197 mask1 = _mm_srli_epi32(mask0, 16);
198 mask0 = _mm_max_epi16(mask0, mask1);
199 non_zero_count = _mm_extract_epi16(mask0, 0) + 1;
200
201 return non_zero_count;
202 }
203