1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert DATATYPE in ["QS8", "QU8"] 7$assert SSE in [2, 4] 8$assert not AVX or SSE == 4 9$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE] 10$assert BATCH_TILE % 8 == 0 11$assert BATCH_TILE >= 8 12$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 13#include <assert.h> 14 15#include <${SSE_HEADER}> 16 17#include <xnnpack/unaligned.h> 18#include <xnnpack/vadd.h> 19 20 21$PARAMS_STRUCT = "sse4_mul16" if SSE == 4 and DATATYPE == "QS8" else "sse2" 22$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 23$_MM_CVTEPX8_EPI16 = {"QS8": "_mm_cvtepi8_epi16", "QU8": "_mm_cvtepu8_epi16"}[DATATYPE] 24$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 25$_MM_MIN_EPX8 = {"QS8": "_mm_min_epi8", "QU8": "_mm_min_epu8"}[DATATYPE] 26$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE] 27$ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE] 28void xnn_${DATATYPE.lower()}_vaddc_minmax_ukernel__${ISA}_mul16_ld64_x${BATCH_TILE}( 29 size_t n, 30 const ${XINT8_T}* input_a, 31 const ${XINT8_T}* input_b, 32 ${XINT8_T}* output, 33 const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 34{ 35 const __m128i vbias = _mm_add_epi32( 36 _mm_shuffle_epi32(_mm_cvtsi32_si128(params->${PARAMS_STRUCT}.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)), 37 _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.bias)); 38 const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.a_multiplier_lo); 39 const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.a_multiplier_hi); 40 const __m128i vshift = _mm_cvtsi32_si128((int) params->${PARAMS_STRUCT}.shift); 41 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point); 42 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min); 43 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max); 44 45 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 46 $if SSE == 4: 47 const __m128i va${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) input_a)); 48 $for N in range(8, BATCH_TILE, 8): 49 const __m128i va${ABC[N:N+8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) (input_a + ${N}))); 50 $else: 51 __m128i va${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_a); 52 $for N in range(8, BATCH_TILE, 8): 53 __m128i va${ABC[N:N+8]} = _mm_loadl_epi64((const __m128i*) (input_a + ${N})); 54 input_a += ${BATCH_TILE}; 55 56 $if SSE < 4: 57 $if DATATYPE == "QU8": 58 const __m128i vzero = _mm_setzero_si128(); 59 $for N in range(0, BATCH_TILE, 8): 60 va${ABC[N:N+8]} = _mm_unpacklo_epi8(va${ABC[N:N+8]}, vzero); 61 $else: 62 $for N in range(0, BATCH_TILE, 8): 63 va${ABC[N:N+8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[N:N+8]}, va${ABC[N:N+8]}), 8); 64 65 $for N in range(0, BATCH_TILE, 8): 66 __m128i vaprod${ABC[N:N+8]}hi = _mm_mulhi_epu16(va${ABC[N:N+8]}, va_multiplier_lo); 67 const __m128i vaprod${ABC[N:N+8]}lo = _mm_mullo_epi16(va${ABC[N:N+8]}, va_multiplier_lo); 68 69 $for N in range(0, BATCH_TILE, 8): 70 vaprod${ABC[N:N+8]}hi = _mm_add_epi16(vaprod${ABC[N:N+8]}hi, _mm_mullo_epi16(va${ABC[N:N+8]}, va_multiplier_hi)); 71 72 $if DATATYPE == "QS8": 73 $for N in range(0, BATCH_TILE, 8): 74 vaprod${ABC[N:N+8]}hi = _mm_sub_epi16(vaprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[N:N+8]}, 15), va_multiplier_lo)); 75 76 $for N in range(0, BATCH_TILE, 8): 77 __m128i vacc${ABC[N:N+4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod${ABC[N:N+8]}lo, vaprod${ABC[N:N+8]}hi)); 78 __m128i vacc${ABC[N+4:N+8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod${ABC[N:N+8]}lo, vaprod${ABC[N:N+8]}hi)); 79 80 $for N in range(0, BATCH_TILE, 4): 81 vacc${ABC[N:N+4]} = _mm_sra_epi32(vacc${ABC[N:N+4]}, vshift); 82 83 $for N in range(0, BATCH_TILE, 8): 84 __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}), voutput_zero_point); 85 86 $if DATATYPE == "QS8" and SSE < 4: 87 $for N in range(0, BATCH_TILE, 8): 88 vout${ABC[N:N+8]} = _mm_max_epi16(vout${ABC[N:N+8]}, voutput_min); 89 90 $for N in range(0, BATCH_TILE, 8): 91 vout${ABC[N:N+8]} = _mm_min_epi16(vout${ABC[N:N+8]}, voutput_max); 92 93 $for N in range(0, BATCH_TILE, 16): 94 $if N + 8 < BATCH_TILE: 95 __m128i vout${ABC[N:N+16]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]}); 96 $else: 97 __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]}); 98 99 $if DATATYPE == "QU8" or SSE == 4: 100 $for N in range(0, BATCH_TILE, 16): 101 $if N + 8 < BATCH_TILE: 102 vout${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+16]}, voutput_min); 103 $else: 104 vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min); 105 106 $for N in range(0, BATCH_TILE, 16): 107 $if N + 8 < BATCH_TILE: 108 vout${ABC[N:N+16]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+16]}, voutput_max); 109 $else: 110 vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max); 111 112 $if BATCH_TILE >= 16: 113 _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); 114 $else: 115 _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); 116 $for N in range(16, BATCH_TILE, 16): 117 $if N + 8 < BATCH_TILE: 118 _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]}); 119 $else: 120 _mm_storel_epi64((__m128i*) (output + ${N}), vout${ABC[N:N+8]}${ABC[N:N+8]}); 121 output += ${BATCH_TILE}; 122 } 123 if XNN_UNLIKELY(n != 0) { 124 ${"do " if BATCH_TILE > 8 else ""}{ 125 $if SSE == 4: 126 const __m128i va${ABC[0:8]} = ${_MM_CVTEPX8_EPI16}(_mm_loadl_epi64((const __m128i*) input_a)); 127 $else: 128 __m128i va${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_a); 129 $if BATCH_TILE > 8: 130 input_a += 8; 131 132 $if SSE < 4: 133 $if DATATYPE == "QU8": 134 va${ABC[0:8]} = _mm_unpacklo_epi8(va${ABC[0:8]}, _mm_setzero_si128()); 135 $else: 136 va${ABC[0:8]} = _mm_srai_epi16(_mm_unpacklo_epi8(va${ABC[0:8]}, va${ABC[0:8]}), 8); 137 138 __m128i vaprod${ABC[0:8]}hi = _mm_mulhi_epu16(va${ABC[0:8]}, va_multiplier_lo); 139 const __m128i vaprod${ABC[0:8]}lo = _mm_mullo_epi16(va${ABC[0:8]}, va_multiplier_lo); 140 141 vaprod${ABC[0:8]}hi = _mm_add_epi16(vaprod${ABC[0:8]}hi, _mm_mullo_epi16(va${ABC[0:8]}, va_multiplier_hi)); 142 143 $if DATATYPE == "QS8": 144 vaprod${ABC[0:8]}hi = _mm_sub_epi16(vaprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(va${ABC[0:8]}, 15), va_multiplier_lo)); 145 146 __m128i vacc${ABC[0:4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod${ABC[0:8]}lo, vaprod${ABC[0:8]}hi)); 147 __m128i vacc${ABC[4:8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod${ABC[0:8]}lo, vaprod${ABC[0:8]}hi)); 148 149 vacc${ABC[0:4]} = _mm_sra_epi32(vacc${ABC[0:4]}, vshift); 150 vacc${ABC[4:8]} = _mm_sra_epi32(vacc${ABC[4:8]}, vshift); 151 152 __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point); 153 $if DATATYPE == "QS8" and SSE < 4: 154 vout${ABC[0:8]} = _mm_max_epi16(vout${ABC[0:8]}, voutput_min); 155 vout${ABC[0:8]} = _mm_min_epi16(vout${ABC[0:8]}, voutput_max); 156 157 __m128i vout${ABC[0:8]}${ABC[0:8]} = ${_MM_PACKXS_EPI16}(vout${ABC[0:8]}, vout${ABC[0:8]}); 158 $if DATATYPE == "QU8" or SSE == 4: 159 vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MAX_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min); 160 vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MIN_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max); 161 162 $if BATCH_TILE > 8: 163 if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) { 164 _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); 165 output += 8; 166 n -= 8 * sizeof(${XINT8_T}); 167 } else { 168 if (n & (4 * sizeof(${XINT8_T}))) { 169 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]})); 170 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); 171 output += 4; 172 } 173 if (n & (2 * sizeof(${XINT8_T}))) { 174 $if SSE == 4: 175 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0)); 176 $else: 177 unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]})); 178 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); 179 output += 2; 180 } 181 if (n & (1 * sizeof(${XINT8_T}))) { 182 $if SSE == 4: 183 *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); 184 $else: 185 *output = (${XINT8_T}) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); 186 } 187 n = 0; 188 } 189 $else: 190 if (n & (4 * sizeof(${XINT8_T}))) { 191 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]})); 192 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); 193 output += 4; 194 } 195 if (n & (2 * sizeof(${XINT8_T}))) { 196 $if SSE == 4: 197 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0)); 198 $else: 199 unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]})); 200 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); 201 output += 2; 202 } 203 if (n & (1 * sizeof(${XINT8_T}))) { 204 $if SSE == 4: 205 *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); 206 $else: 207 *output = (${XINT8_T}) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); 208 } 209 }${" while (n != 0);" if BATCH_TILE > 8 else ""} 210 } 211} 212