xref: /aosp_15_r20/external/XNNPACK/src/qu8-requantization/gemmlowp-ssse3.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stddef.h>
12 
13 #include <tmmintrin.h>
14 
15 #include <xnnpack/math.h>
16 #include <xnnpack/requantization-stubs.h>
17 
18 
xnn_qu8_requantize_gemmlowp__ssse3(size_t n,const int32_t * input,float scale,uint8_t zero_point,uint8_t qmin,uint8_t qmax,uint8_t * output)19 void xnn_qu8_requantize_gemmlowp__ssse3(
20     size_t n,
21     const int32_t* input,
22     float scale,
23     uint8_t zero_point,
24     uint8_t qmin,
25     uint8_t qmax,
26     uint8_t* output)
27 {
28   assert(n % 16 == 0);
29   assert(scale < 1.0f);
30   assert(scale >= 0x1.0p-32f);
31 
32   // Compute requantization parameters.
33   const uint32_t scale_bits = float_as_uint32(scale);
34 
35   // Multiplier is in [0x40000000, 0x7FFFFF80] range.
36   const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
37   assert(multiplier >= INT32_C(0x40000000));
38   assert(multiplier <= INT32_C(0x7FFFFF80));
39 
40   // Shift is in [0, 31] range.
41   const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
42   assert(shift >= 0);
43   assert(shift < 32);
44 
45   const __m128i vmultiplier = _mm_set1_epi32(multiplier);
46   const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
47   const __m128i vqmin = _mm_set1_epi8((char) qmin);
48   const __m128i vqmax = _mm_set1_epi8((char) qmax);
49   const __m128i vshift = _mm_cvtsi32_si128((int) shift);
50   const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
51   const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
52   const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
53   const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
54   for (; n != 0; n -= 16) {
55     const __m128i x = _mm_loadu_si128((const __m128i*) input);
56     const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
57     const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
58     const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
59     input += 16;
60 
61     const __m128i x_abs = _mm_abs_epi32(x);
62     const __m128i y_abs = _mm_abs_epi32(y);
63     const __m128i z_abs = _mm_abs_epi32(z);
64     const __m128i w_abs = _mm_abs_epi32(w);
65 
66     const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
67     const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
68     const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
69     const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
70 
71     const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
72     const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
73     const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
74     const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
75 
76     const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
77     const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
78     const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
79     const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
80 
81     const __m128i x_neg_mask_even = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
82     const __m128i y_neg_mask_even = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
83     const __m128i z_neg_mask_even = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
84     const __m128i w_neg_mask_even = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
85 
86     const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
87     const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
88     const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
89     const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
90 
91     const __m128i x_rounded_product_even = _mm_add_epi64(x_product_even, vq31rounding);
92     const __m128i y_rounded_product_even = _mm_add_epi64(y_product_even, vq31rounding);
93     const __m128i z_rounded_product_even = _mm_add_epi64(z_product_even, vq31rounding);
94     const __m128i w_rounded_product_even = _mm_add_epi64(w_product_even, vq31rounding);
95 
96     const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
97     const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
98     const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
99     const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
100 
101     const __m128i x_neg_mask_odd = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
102     const __m128i y_neg_mask_odd = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
103     const __m128i z_neg_mask_odd = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
104     const __m128i w_neg_mask_odd = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
105 
106     const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
107     const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
108     const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
109     const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
110 
111     const __m128i x_rounded_product_odd = _mm_add_epi64(x_product_odd, vq31rounding);
112     const __m128i y_rounded_product_odd = _mm_add_epi64(y_product_odd, vq31rounding);
113     const __m128i z_rounded_product_odd = _mm_add_epi64(z_product_odd, vq31rounding);
114     const __m128i w_rounded_product_odd = _mm_add_epi64(w_product_odd, vq31rounding);
115 
116     const __m128i x_q31product_even = _mm_srli_epi64(x_rounded_product_even, 31);
117     const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
118     const __m128i y_q31product_even = _mm_srli_epi64(y_rounded_product_even, 31);
119     const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
120     const __m128i z_q31product_even = _mm_srli_epi64(z_rounded_product_even, 31);
121     const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
122     const __m128i w_q31product_even = _mm_srli_epi64(w_rounded_product_even, 31);
123     const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
124 
125     const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
126         _mm_castsi128_ps(x_q31product_even), _mm_castsi128_ps(x_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
127     const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
128         _mm_castsi128_ps(y_q31product_even), _mm_castsi128_ps(y_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
129     const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
130         _mm_castsi128_ps(z_q31product_even), _mm_castsi128_ps(z_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
131     const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
132         _mm_castsi128_ps(w_q31product_even), _mm_castsi128_ps(w_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
133 
134     const __m128i x_q31product = _mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
135     const __m128i y_q31product = _mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
136     const __m128i z_q31product = _mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
137     const __m128i w_q31product = _mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
138 
139     const __m128i x_remainder =
140         _mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
141     const __m128i y_remainder =
142         _mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
143     const __m128i z_remainder =
144         _mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
145     const __m128i w_remainder =
146         _mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
147 
148     const __m128i x_scaled =
149         _mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
150     const __m128i y_scaled =
151         _mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
152     const __m128i z_scaled =
153         _mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
154     const __m128i w_scaled =
155         _mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
156 
157     const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
158     const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
159     const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
160     const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
161 
162     // 16x PSHUFD
163     // 4x SHUFPS
164     // 8x PMULUDQ
165     // 8x PXOR (setzero)
166     // 8x PXOR
167     // 4x PAND
168     // 8x PADDQ
169     // 4x PADDD
170     // 2x PADDW
171     // 8x PSUBQ
172     // 4x PSUBD
173     // 8x PSRLQ (immediate)
174     // 4x PSRAD (register)
175     // 12x PCMPGTD
176     // 4x PABSD
177     // 2x PACKSSDW
178     // 1x PACKUSWB
179     // 1x PMAXUB
180     // 1x PMINUB
181     // ---------------------
182     // 107 instructions total
183 
184     _mm_storeu_si128((__m128i*) output, xyzw_clamped);
185     output += 16;
186   }
187 }
188