xref: /aosp_15_r20/external/XNNPACK/src/qs8-igemm/MRx4c8-sse.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert SSE in [2, 3, 4]
7$assert not XOP or AVX
8$assert not AVX or SSE == 4
9$assert REQUANTIZATION == "FP32"
10$assert DATATYPE in ["QC8", "QS8", "QU8"]
11$assert VARIANT in ["LD64", "LD128"]
12$assert MR <= 4
13#include <assert.h>
14
15$if XOP:
16  #if defined(__GNUC__) || defined(__clang__)
17    #include <x86intrin.h>
18  #else
19    #include <immintrin.h>
20    #include <ammintrin.h>
21  #endif
22$else:
23  $SSE_HEADER = {2: "emmintrin.h", 3: "tmmintrin.h", 4: "smmintrin.h"}[SSE]
24  #include <${SSE_HEADER}>
25
26#include <xnnpack/igemm.h>
27#include <xnnpack/math.h>
28#include <xnnpack/unaligned.h>
29
30
31$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("sse4" if SSE == 4 and DATATYPE != "QU8" else "sse2")
32$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower()
33$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t"
34$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE]
35void xnn_${DATATYPE.lower()}_igemm_minmax_fp32_ukernel_${MR}x4c8__${ISA}_${VARIANT.lower()}(
36    size_t mr,
37    size_t nc,
38    size_t kc,
39    size_t ks,
40    const ${XINT8_T}** restrict a,
41    const void* restrict w,
42    ${XINT8_T}* restrict c,
43    size_t cm_stride,
44    size_t cn_stride,
45    size_t a_offset,
46    const ${XINT8_T}* zero,
47    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
48{
49  assert(mr != 0);
50  assert(mr <= ${MR});
51  assert(nc != 0);
52  assert(kc != 0);
53  assert(ks != 0);
54  assert(ks % (${MR} * sizeof(void*)) == 0);
55  assert(a_offset % sizeof(${XINT8_T}) == 0);
56  assert(a != NULL);
57  assert(w != NULL);
58  assert(c != NULL);
59
60  kc = round_up_po2(kc, 8);
61  ${XINT8_T}* c0 = c;
62  $for M in range(1, MR):
63    ${XINT8_T}* c${M} = (${XINT8_T}*) ((uintptr_t) c${M-1} + cm_stride);
64    $if M % 2 == 0:
65      if XNN_UNPREDICTABLE(mr <= ${M}) {
66        c${M} = c${M-1};
67      }
68    $elif M + 1 == MR:
69      if XNN_UNPREDICTABLE(mr != ${M+1}) {
70        c${M} = c${M-1};
71      }
72    $else:
73      if XNN_UNPREDICTABLE(mr < ${M+1}) {
74        c${M} = c${M-1};
75      }
76
77  do {
78    $for N in range(4):
79      __m128i vacc0x${N} = _mm_cvtsi32_si128(((const int*) w)[${N}]);
80    $for M in range(1, MR):
81      $for N in range(4):
82        __m128i vacc${M}x${N} = vacc0x${N};
83    w = (const int32_t*) w + 4;
84
85    size_t p = ks;
86    do {
87      $for M in range(MR):
88        const ${XINT8_T}* restrict a${M} = a[${M}];
89        if XNN_UNPREDICTABLE(a${M} != zero) {
90          a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + a_offset);
91        }
92      a += ${MR};
93
94      size_t k = 0;
95      $if DATATYPE == "QU8":
96        const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.kernel_zero_point);
97        $if SSE < 4 or VARIANT == "LD128":
98          const __m128i vzero = _mm_setzero_si128();
99      while (k < kc) {
100        $for M in range(MR):
101          const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M});
102          $if DATATYPE == "QU8":
103            $if SSE == 4:
104              const __m128i vxa${M} = _mm_cvtepu8_epi16(va${M});
105            $else:
106              const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero);
107          $else:
108            $if SSE == 4:
109              const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M});
110            $else:
111              const __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8);
112          a${M} += 8;
113
114        $if VARIANT == "LD128":
115          $for N in range(0, 4, 2):
116            $if N == 0:
117              const __m128i vb${N}${N+1} = _mm_load_si128((const __m128i*) w);
118            $else:
119              const __m128i vb${N}${N+1} = _mm_load_si128((const __m128i*) ((const ${XINT8_T}*) w + ${N * 8}));
120            $if DATATYPE == "QU8":
121              const __m128i vxb${N} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${N}${N+1}, vzero), vb_zero_point);
122              const __m128i vxb${N+1} = _mm_sub_epi16(_mm_unpackhi_epi8(vb${N}${N+1}, vzero), vb_zero_point);
123            $elif SSE == 4:
124              const __m128i vxb${N} = _mm_cvtepi8_epi16(vb${N}${N+1});
125              const __m128i vxb${N+1} = _mm_srai_epi16(_mm_unpackhi_epi8(vb${N}${N+1}, vb${N}${N+1}), 8);
126            $else:
127              const __m128i vsb${N}${N+1} = _mm_cmpgt_epi8(_mm_setzero_si128(), vb${N}${N+1});
128              const __m128i vxb${N} = _mm_unpacklo_epi8(vb${N}${N+1}, vsb${N}${N+1});
129              const __m128i vxb${N+1} = _mm_unpackhi_epi8(vb${N}${N+1}, vsb${N}${N+1});
130
131            $for M in range(MR):
132              $if XOP:
133                vacc${M}x${N} = _mm_maddd_epi16(vxa${M}, vxb${N}, vacc${M}x${N});
134                vacc${M}x${N+1} = _mm_maddd_epi16(vxa${M}, vxb${N+1}, vacc${M}x${N+1});
135              $else:
136                vacc${M}x${N} = _mm_add_epi32(vacc${M}x${N}, _mm_madd_epi16(vxa${M}, vxb${N}));
137                vacc${M}x${N+1} = _mm_add_epi32(vacc${M}x${N+1}, _mm_madd_epi16(vxa${M}, vxb${N+1}));
138        $else:
139          $for N in range(4):
140            $if N == 0:
141              const __m128i vb${N} = _mm_loadl_epi64((const __m128i*) w);
142            $else:
143              const __m128i vb${N} = _mm_loadl_epi64((const __m128i*) ((const ${XINT8_T}*) w + ${N * 8}));
144            $if DATATYPE == "QU8":
145              $if SSE == 4:
146                const __m128i vxb${N} = _mm_sub_epi16(_mm_cvtepu8_epi16(vb${N}), vb_zero_point);
147              $else:
148                const __m128i vxb${N} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${N}, vzero), vb_zero_point);
149            $else:
150              $if SSE == 4:
151                const __m128i vxb${N} = _mm_cvtepi8_epi16(vb${N});
152              $else:
153                const __m128i vxb${N} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${N}, vb${N}), 8);
154
155            $for M in range(MR):
156              $if XOP:
157                vacc${M}x${N} = _mm_maddd_epi16(vxa${M}, vxb${N}, vacc${M}x${N});
158              $else:
159                vacc${M}x${N} = _mm_add_epi32(vacc${M}x${N}, _mm_madd_epi16(vxa${M}, vxb${N}));
160
161        w = (const void*) ((const ${XINT8_T}*) w + 32);
162        k += 8 * sizeof(${XINT8_T});
163      }
164      p -= ${MR} * sizeof(void*);
165    } while (p != 0);
166
167    $if SSE >= 3:
168      $for M in range(MR):
169        const __m128i vacc${M}x01 = _mm_hadd_epi32(vacc${M}x0, vacc${M}x1);
170        const __m128i vacc${M}x23 = _mm_hadd_epi32(vacc${M}x2, vacc${M}x3);
171
172      $for M in range(MR):
173        __m128i vacc${M}x0123 = _mm_hadd_epi32(vacc${M}x01, vacc${M}x23);
174    $else:
175      $for M in range(MR):
176        const __m128i vacc${M}x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc${M}x0, vacc${M}x2), _mm_unpackhi_epi32(vacc${M}x0, vacc${M}x2));
177        const __m128i vacc${M}x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc${M}x1, vacc${M}x3), _mm_unpackhi_epi32(vacc${M}x1, vacc${M}x3));
178
179      $for M in range(MR):
180        __m128i vacc${M}x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc${M}x02, vacc${M}x13), _mm_unpackhi_epi32(vacc${M}x02, vacc${M}x13));
181
182    $for M in range(MR):
183      __m128 vscaled${M}x0123 = _mm_cvtepi32_ps(vacc${M}x0123);
184
185    $if DATATYPE == "QC8":
186      const __m128 vscale0123 = _mm_load_ps((const float*) w);
187      w = (const void*) ((const float*) w + 4);
188      $for M in range(MR):
189        vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale0123);
190    $else:
191      const __m128 vscale = _mm_load_ps(params->${PARAMS_STRUCT}.scale);
192      $for M in range(MR):
193        vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale);
194
195    const __m128 voutput_max_less_zero_point = _mm_load_ps(params->${PARAMS_STRUCT}.output_max_less_zero_point);
196    $for M in range(MR):
197      vscaled${M}x0123 = _mm_min_ps(vscaled${M}x0123, voutput_max_less_zero_point);
198
199    $for M in range(MR):
200      vacc${M}x0123 = _mm_cvtps_epi32(vscaled${M}x0123);
201
202    const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point);
203    $for M in range(0, MR, 2):
204      __m128i vacc${M}${min(M+1, MR-1)}x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point);
205
206    $if DATATYPE == "QU8":
207      $if MR > 2:
208        __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123);
209      $else:
210        __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123);
211
212      vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min));
213    $else:
214      $if SSE < 4:
215        const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
216        $for M in range(0, MR, 2):
217          vacc${M}${min(M+1, MR-1)}x0123 = _mm_max_epi16(vacc${M}${min(M+1, MR-1)}x0123, voutput_min);
218
219      $if MR > 2:
220        __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123);
221      $else:
222        __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123);
223
224      $if SSE == 4:
225        vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min));
226
227    if (nc >= 4) {
228      $for M in reversed(range(1, MR)):
229        $if SSE == 4:
230          unaligned_store_u32(c${M}, (uint32_t) _mm_extract_epi32(vout, ${M}));
231        $else:
232          unaligned_store_u32(c${M}, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(${M}, ${M}, ${M}, ${M}))));
233        c${M} = (${XINT8_T}*) ((uintptr_t) c${M} + cn_stride);
234      unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
235      c0 = (${XINT8_T}*) ((uintptr_t) c0 + cn_stride);
236
237      a = (const ${XINT8_T}**restrict) ((uintptr_t) a - ks);
238
239      nc -= 4;
240    } else {
241      if (nc & 2) {
242        $for M in reversed(range(MR)):
243          unaligned_store_u16(c${M}, (uint16_t) _mm_extract_epi16(vout, ${M * 2}));
244          c${M} += 2;
245        vout = _mm_srli_epi32(vout, 16);
246      }
247      if (nc & 1) {
248        $if SSE == 4:
249          $for M in reversed(range(MR)):
250            *c${M} = (${XINT8_T}) _mm_extract_epi8(vout, ${M * 4});
251        $else:
252          $for M in reversed(range(1, MR)):
253            *c${M} = (${XINT8_T}) _mm_extract_epi16(vout, ${M * 2});
254          *c0 = (${XINT8_T}) _mm_cvtsi128_si32(vout);
255      }
256
257      nc = 0;
258    }
259  } while (nc != 0);
260}
261