1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE >= 16 7$assert BATCH_TILE % 16 == 0 8$SIMD_TILE = BATCH_TILE // 16 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <tmmintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/vcvt.h> 16#include <xnnpack/unaligned.h> 17 18 19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 20$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 21$_MM_MULHI_EPX16 = {"QS8": "_mm_mulhi_epi16", "QU8": "_mm_mulhi_epu16"}[DATATYPE] 22void xnn_${DATATYPE.lower()}_vcvt_ukernel__sse2_x${BATCH_TILE}( 23 size_t n, 24 const ${XINT8_T}* x, 25 ${XINT8_T}* y, 26 const union xnn_${DATATYPE.lower()}_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 27{ 28 assert(n != 0); 29 assert(n % sizeof(${XINT8_T}) == 0); 30 assert(x != NULL); 31 assert(y != NULL); 32 33 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier); 34 const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias); 35 $if DATATYPE == "QU8": 36 const __m128i vzero = _mm_setzero_si128(); 37 $if BATCH_TILE > 8: 38 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 39 const __m128i vx${ABC[0]} = _mm_loadu_si128((const __m128i*) x); 40 $for N in range(1, SIMD_TILE): 41 const __m128i vx${ABC[N]} = _mm_loadu_si128((const __m128i*) (x + ${N * 16})); 42 x += ${BATCH_TILE}; 43 44 $for N in range(SIMD_TILE): 45 $if DATATYPE == "QU8": 46 const __m128i vextx${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vzero); 47 const __m128i vextx${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vzero); 48 $else: 49 const __m128i vm${ABC[N]} = _mm_cmpgt_epi8(_mm_setzero_si128(), vx${ABC[N]}); 50 const __m128i vextx${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vm${ABC[N]}); 51 const __m128i vextx${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vm${ABC[N]}); 52 53 $for N in range(2*SIMD_TILE): 54 const __m128i vprodlo${ABC[N]} = _mm_mullo_epi16(vextx${ABC[N]}, vmultiplier); 55 const __m128i vprodhi${ABC[N]} = ${_MM_MULHI_EPX16}(vextx${ABC[N]}, vmultiplier); 56 57 $for N in range(2*SIMD_TILE): 58 __m128i vacc${ABC[2*N]} = _mm_unpacklo_epi16(vprodlo${ABC[N]}, vprodhi${ABC[N]}); 59 __m128i vacc${ABC[2*N+1]} = _mm_unpackhi_epi16(vprodlo${ABC[N]}, vprodhi${ABC[N]}); 60 61 $for N in range(4*SIMD_TILE): 62 $if DATATYPE == "QU8": 63 vacc${ABC[N]} = _mm_add_epi32(vacc${ABC[N]}, vbias); 64 $else: 65 vacc${ABC[N]} = _mm_sub_epi32(vbias, vacc${ABC[N]}); 66 67 $for N in range(4*SIMD_TILE): 68 vacc${ABC[N]} = _mm_srai_epi32(vacc${ABC[N]}, 8); 69 70 $for N in range(2*SIMD_TILE): 71 vacc${ABC[N]} = _mm_packs_epi32(vacc${ABC[2*N]}, vacc${ABC[2*N+1]}); 72 73 $for N in range(SIMD_TILE): 74 const __m128i vy${ABC[N]} = ${_MM_PACKXS_EPI16}(vacc${ABC[2*N]}, vacc${ABC[2*N+1]}); 75 76 _mm_storeu_si128((__m128i*) y, vy${ABC[0]}); 77 $for N in range(1, SIMD_TILE): 78 _mm_storeu_si128((__m128i*) (y + ${N * 16}), vy${ABC[N]}); 79 y += ${BATCH_TILE}; 80 } 81 for (; n >= 16 * sizeof(${XINT8_T}); n -= 16 * sizeof(${XINT8_T})) { 82 const __m128i vx = _mm_loadu_si128((const __m128i*) x); 83 x += 16; 84 85 $if DATATYPE == "QU8": 86 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero); 87 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero); 88 $else: 89 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx); 90 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm); 91 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm); 92 93 const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier); 94 const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier); 95 const __m128i vprodhi_lo = ${_MM_MULHI_EPX16}(vextx_lo, vmultiplier); 96 const __m128i vprodhi_hi = ${_MM_MULHI_EPX16}(vextx_hi, vmultiplier); 97 98 __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo); 99 __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo); 100 __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi); 101 __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi); 102 103 $if DATATYPE == "QU8": 104 vacc_ll = _mm_add_epi32(vacc_ll, vbias); 105 vacc_lh = _mm_add_epi32(vacc_lh, vbias); 106 vacc_hl = _mm_add_epi32(vacc_hl, vbias); 107 vacc_hh = _mm_add_epi32(vacc_hh, vbias); 108 $else: 109 vacc_ll = _mm_sub_epi32(vbias, vacc_ll); 110 vacc_lh = _mm_sub_epi32(vbias, vacc_lh); 111 vacc_hl = _mm_sub_epi32(vbias, vacc_hl); 112 vacc_hh = _mm_sub_epi32(vbias, vacc_hh); 113 114 vacc_ll = _mm_srai_epi32(vacc_ll, 8); 115 vacc_lh = _mm_srai_epi32(vacc_lh, 8); 116 vacc_hl = _mm_srai_epi32(vacc_hl, 8); 117 vacc_hh = _mm_srai_epi32(vacc_hh, 8); 118 119 const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh); 120 const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh); 121 122 const __m128i vy = ${_MM_PACKXS_EPI16}(vacc_lo, vacc_hi); 123 _mm_storeu_si128((__m128i*) y, vy); 124 y += 16; 125 } 126 if XNN_UNLIKELY(n != 0) { 127 assert(n >= 1 * sizeof(${XINT8_T})); 128 assert(n <= 15 * sizeof(${XINT8_T})); 129 130 const __m128i vx = _mm_loadu_si128((const __m128i*) x); 131 132 $if DATATYPE == "QU8": 133 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero); 134 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero); 135 $else: 136 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx); 137 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm); 138 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm); 139 140 const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier); 141 const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier); 142 const __m128i vprodhi_lo = ${_MM_MULHI_EPX16}(vextx_lo, vmultiplier); 143 const __m128i vprodhi_hi = ${_MM_MULHI_EPX16}(vextx_hi, vmultiplier); 144 145 __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo); 146 __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo); 147 __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi); 148 __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi); 149 150 $if DATATYPE == "QU8": 151 vacc_ll = _mm_add_epi32(vacc_ll, vbias); 152 vacc_lh = _mm_add_epi32(vacc_lh, vbias); 153 vacc_hl = _mm_add_epi32(vacc_hl, vbias); 154 vacc_hh = _mm_add_epi32(vacc_hh, vbias); 155 $else: 156 vacc_ll = _mm_sub_epi32(vbias, vacc_ll); 157 vacc_lh = _mm_sub_epi32(vbias, vacc_lh); 158 vacc_hl = _mm_sub_epi32(vbias, vacc_hl); 159 vacc_hh = _mm_sub_epi32(vbias, vacc_hh); 160 161 vacc_ll = _mm_srai_epi32(vacc_ll, 8); 162 vacc_lh = _mm_srai_epi32(vacc_lh, 8); 163 vacc_hl = _mm_srai_epi32(vacc_hl, 8); 164 vacc_hh = _mm_srai_epi32(vacc_hh, 8); 165 166 const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh); 167 const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh); 168 169 __m128i vy = ${_MM_PACKXS_EPI16}(vacc_lo, vacc_hi); 170 if (n & (8 * sizeof(${XINT8_T}))) { 171 _mm_storel_epi64((__m128i*) y, vy); 172 vy = _mm_unpackhi_epi64(vy, vy); 173 y += 8; 174 } 175 if (n & (4 * sizeof(${XINT8_T}))) { 176 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy)); 177 vy = _mm_srli_epi64(vy, 32); 178 y += 4; 179 } 180 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy); 181 if (n & (2 * sizeof(${XINT8_T}))) { 182 unaligned_store_u16(y, (uint16_t) vy_lo); 183 vy_lo >>= 16; 184 y += 2; 185 } 186 if (n & (1 * sizeof(${XINT8_T}))) { 187 *y = (${XINT8_T}) vy_lo; 188 } 189 } 190} 191