1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert SSE in [2, 4] 7$assert not XOP or AVX 8$assert not AVX or SSE == 4 9$assert REQUANTIZATION == "FP32" 10$assert DATATYPE in ["QC8", "QS8", "QU8"] 11$assert VARIANT in ["LD64", "LD128"] 12$assert MR <= 4 13#include <assert.h> 14 15$if XOP: 16 #if defined(__GNUC__) || defined(__clang__) 17 #include <x86intrin.h> 18 #else 19 #include <immintrin.h> 20 #include <ammintrin.h> 21 #endif 22$else: 23 $SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE] 24 #include <${SSE_HEADER}> 25 26#include <xnnpack/igemm.h> 27#include <xnnpack/math.h> 28#include <xnnpack/unaligned.h> 29 30 31$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("sse4" if SSE == 4 and DATATYPE != "QU8" else "sse2") 32$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower() 33$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t" 34$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE] 35void xnn_${DATATYPE.lower()}_igemm_minmax_fp32_ukernel_${MR}x4c2s4__${ISA}_${VARIANT.lower()}( 36 size_t mr, 37 size_t nc, 38 size_t kc, 39 size_t ks, 40 const ${XINT8_T}** restrict a, 41 const void* restrict w, 42 ${XINT8_T}* restrict c, 43 size_t cm_stride, 44 size_t cn_stride, 45 size_t a_offset, 46 const ${XINT8_T}* zero, 47 const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 48{ 49 assert(mr != 0); 50 assert(mr <= ${MR}); 51 assert(nc != 0); 52 assert(kc != 0); 53 assert(ks != 0); 54 assert(ks % (${MR} * sizeof(void*)) == 0); 55 assert(a_offset % sizeof(${XINT8_T}) == 0); 56 assert(a != NULL); 57 assert(w != NULL); 58 assert(c != NULL); 59 60 kc = round_up_po2(kc, 8 * sizeof(${XINT8_T})); 61 ${XINT8_T}* c0 = c; 62 $for M in range(1, MR): 63 ${XINT8_T}* c${M} = (${XINT8_T}*) ((uintptr_t) c${M-1} + cm_stride); 64 $if M % 2 == 0: 65 if XNN_UNPREDICTABLE(mr <= ${M}) { 66 c${M} = c${M-1}; 67 } 68 $elif M + 1 == MR: 69 if XNN_UNPREDICTABLE(mr != ${M+1}) { 70 c${M} = c${M-1}; 71 } 72 $else: 73 if XNN_UNPREDICTABLE(mr < ${M+1}) { 74 c${M} = c${M-1}; 75 } 76 77 do { 78 __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); 79 $for M in range(1, MR): 80 __m128i vacc${M}x0123 = vacc0x0123; 81 w = (const void*) ((const int32_t*) w + 4); 82 83 size_t p = ks; 84 do { 85 $for M in range(MR): 86 const ${XINT8_T}* restrict a${M} = a[${M}]; 87 if XNN_UNPREDICTABLE(a${M} != zero) { 88 a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + a_offset); 89 } 90 a += ${MR}; 91 92 size_t k = kc; 93 $if DATATYPE == "QU8": 94 const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.kernel_zero_point); 95 $if SSE < 4 or VARIANT == "LD128": 96 const __m128i vzero = _mm_setzero_si128(); 97 do { 98 $for M in range(MR): 99 const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M}); 100 $if DATATYPE == "QU8": 101 $if SSE == 4: 102 __m128i vxa${M} = _mm_cvtepu8_epi16(va${M}); 103 $else: 104 __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero); 105 $else: 106 $if SSE == 4: 107 __m128i vxa${M} = _mm_cvtepi8_epi16(va${M}); 108 $else: 109 __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8); 110 a${M} += 8; 111 112 $if VARIANT == "LD128": 113 $for K in range(0, 4, 2): 114 $if K == 0: 115 const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) w); 116 $else: 117 const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8})); 118 $if DATATYPE == "QU8": 119 const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}${K+1}, vzero), vb_zero_point); 120 const __m128i vxb${K+1} = _mm_sub_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vzero), vb_zero_point); 121 $elif SSE == 4: 122 const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K}${K+1}); 123 const __m128i vxb${K+1} = _mm_srai_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vb${K}${K+1}), 8); 124 $else: 125 const __m128i vsb${K}${K+1} = _mm_cmpgt_epi8(_mm_setzero_si128(), vb${K}${K+1}); 126 const __m128i vxb${K} = _mm_unpacklo_epi8(vb${K}${K+1}, vsb${K}${K+1}); 127 const __m128i vxb${K+1} = _mm_unpackhi_epi8(vb${K}${K+1}, vsb${K}${K+1}); 128 129 $for M in range(MR): 130 $if XOP: 131 vacc${M}x0123 = _mm_maddd_epi16(vxa${M}, vxb${K}, vacc${M}x0123); 132 $else: 133 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, _mm_madd_epi16(vxa${M}, vxb${K})); 134 vxa${M} = _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 3, 2, 1)); 135 136 $for M in range(MR): 137 $if XOP: 138 vacc${M}x0123 = _mm_maddd_epi16(vxa${M}, vxb${K+1}, vacc${M}x0123); 139 $else: 140 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, _mm_madd_epi16(vxa${M}, vxb${K+1})); 141 $if K + 2 != 4: 142 vxa${M} = _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 3, 2, 1)); 143 $else: 144 $for K in range(4): 145 $if K == 0: 146 const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) w); 147 $else: 148 const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8})); 149 $if DATATYPE == "QU8": 150 $if SSE == 4: 151 const __m128i vxb${K} = _mm_sub_epi16(_mm_cvtepu8_epi16(vb${K}), vb_zero_point); 152 $else: 153 const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}, vzero), vb_zero_point); 154 $else: 155 $if SSE == 4: 156 const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K}); 157 $else: 158 const __m128i vxb${K} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${K}, vb${K}), 8); 159 160 $for M in range(MR): 161 $if XOP: 162 vacc${M}x0123 = _mm_maddd_epi16(vxa${M}, vxb${K}, vacc${M}x0123); 163 $else: 164 vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123, _mm_madd_epi16(vxa${M}, vxb${K})); 165 $if K + 1 != 4: 166 vxa${M} = _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 3, 2, 1)); 167 168 w = (const void*) ((const ${XINT8_T}*) w + 32); 169 k -= 8 * sizeof(${XINT8_T}); 170 } while (k != 0); 171 p -= ${MR} * sizeof(void*); 172 } while (p != 0); 173 174 $for M in range(MR): 175 __m128 vscaled${M}x0123 = _mm_cvtepi32_ps(vacc${M}x0123); 176 177 $if DATATYPE == "QC8": 178 const __m128 vscale0123 = _mm_loadu_ps((const float*) w); 179 w = (const void*) ((const float*) w + 4); 180 $for M in range(MR): 181 vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale0123); 182 $else: 183 const __m128 vscale = _mm_load_ps(params->${PARAMS_STRUCT}.scale); 184 $for M in range(MR): 185 vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale); 186 187 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->${PARAMS_STRUCT}.output_max_less_zero_point); 188 $for M in range(MR): 189 vscaled${M}x0123 = _mm_min_ps(vscaled${M}x0123, voutput_max_less_zero_point); 190 191 $for M in range(MR): 192 vacc${M}x0123 = _mm_cvtps_epi32(vscaled${M}x0123); 193 194 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point); 195 $for M in range(0, MR, 2): 196 __m128i vacc${M}${min(M+1, MR-1)}x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point); 197 198 $if DATATYPE == "QU8": 199 $if MR > 2: 200 __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); 201 $else: 202 __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); 203 204 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min)); 205 $else: 206 $if SSE < 4: 207 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min); 208 $for M in range(0, MR, 2): 209 vacc${M}${min(M+1, MR-1)}x0123 = _mm_max_epi16(vacc${M}${min(M+1, MR-1)}x0123, voutput_min); 210 211 $if MR > 2: 212 __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); 213 $else: 214 __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); 215 216 $if SSE == 4: 217 vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min)); 218 219 if (nc >= 4) { 220 $for M in reversed(range(1, MR)): 221 $if SSE == 4: 222 unaligned_store_u32(c${M}, (uint32_t) _mm_extract_epi32(vout, ${M})); 223 $else: 224 unaligned_store_u32(c${M}, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(${M}, ${M}, ${M}, ${M})))); 225 c${M} = (${XINT8_T}*) ((uintptr_t) c${M} + cn_stride); 226 unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); 227 c0 = (${XINT8_T}*) ((uintptr_t) c0 + cn_stride); 228 229 a = (const ${XINT8_T}**restrict) ((uintptr_t) a - ks); 230 231 nc -= 4; 232 } else { 233 if (nc & 2) { 234 $for M in reversed(range(MR)): 235 unaligned_store_u16(c${M}, (uint16_t) _mm_extract_epi16(vout, ${M * 2})); 236 c${M} += 2; 237 vout = _mm_srli_epi32(vout, 16); 238 } 239 if (nc & 1) { 240 $if SSE == 4: 241 $for M in reversed(range(MR)): 242 *c${M} = (${XINT8_T}) _mm_extract_epi8(vout, ${M * 4}); 243 $else: 244 $for M in reversed(range(1, MR)): 245 *c${M} = (${XINT8_T}) _mm_extract_epi16(vout, ${M * 2}); 246 *c0 = (${XINT8_T}) _mm_cvtsi128_si32(vout); 247 } 248 249 nc = 0; 250 } 251 } while (nc != 0); 252} 253