1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE >= 16 7$assert BATCH_TILE % 16 == 0 8$SIMD_TILE = BATCH_TILE // 16 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <tmmintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/vcvt.h> 16#include <xnnpack/unaligned.h> 17 18 19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 20$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 21void xnn_${DATATYPE.lower()}_vcvt_ukernel__ssse3_x${BATCH_TILE}( 22 size_t n, 23 const ${XINT8_T}* x, 24 ${XINT8_T}* y, 25 const union xnn_${DATATYPE.lower()}_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 26{ 27 assert(n != 0); 28 assert(n % sizeof(${XINT8_T}) == 0); 29 assert(x != NULL); 30 assert(y != NULL); 31 32 const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point); 33 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier); 34 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point); 35 $if DATATYPE == "QU8": 36 const __m128i vzero = _mm_setzero_si128(); 37 $if BATCH_TILE > 8: 38 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 39 const __m128i vx${ABC[0]} = _mm_loadu_si128((const __m128i*) x); 40 $for N in range(1, SIMD_TILE): 41 const __m128i vx${ABC[N]} = _mm_loadu_si128((const __m128i*) (x + ${N * 16})); 42 x += ${BATCH_TILE}; 43 44 $for N in range(SIMD_TILE): 45 $if DATATYPE == "QU8": 46 __m128i vacc${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vzero); 47 __m128i vacc${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vzero); 48 $else: 49 const __m128i vm${ABC[N]} = _mm_cmpgt_epi8(_mm_setzero_si128(), vx${ABC[N]}); 50 __m128i vacc${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vm${ABC[N]}); 51 __m128i vacc${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vm${ABC[N]}); 52 53 $for N in range(2*SIMD_TILE): 54 vacc${ABC[N]} = _mm_sub_epi16(vinput_zero_point, vacc${ABC[N]}); 55 56 $for N in range(2*SIMD_TILE): 57 vacc${ABC[N]} = _mm_slli_epi16(vacc${ABC[N]}, 7); 58 59 $for N in range(2*SIMD_TILE): 60 vacc${ABC[N]} = _mm_mulhrs_epi16(vacc${ABC[N]}, vmultiplier); 61 62 $for N in range(2*SIMD_TILE): 63 vacc${ABC[N]} = _mm_adds_epi16(vacc${ABC[N]}, voutput_zero_point); 64 65 $for N in range(SIMD_TILE): 66 const __m128i vy${ABC[N]} = ${_MM_PACKXS_EPI16}(vacc${ABC[2*N]}, vacc${ABC[2*N+1]}); 67 68 _mm_storeu_si128((__m128i*) y, vy${ABC[0]}); 69 $for N in range(1, SIMD_TILE): 70 _mm_storeu_si128((__m128i*) (y + ${N * 16}), vy${ABC[N]}); 71 y += ${BATCH_TILE}; 72 } 73 for (; n >= 16 * sizeof(${XINT8_T}); n -= 16 * sizeof(${XINT8_T})) { 74 const __m128i vx = _mm_loadu_si128((const __m128i*) x); 75 x += 16; 76 77 $if DATATYPE == "QU8": 78 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero); 79 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero); 80 $else: 81 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx); 82 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vm); 83 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vm); 84 vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo); 85 vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi); 86 vacc_lo = _mm_slli_epi16(vacc_lo, 7); 87 vacc_hi = _mm_slli_epi16(vacc_hi, 7); 88 vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier); 89 vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier); 90 vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point); 91 vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point); 92 93 const __m128i vy = ${_MM_PACKXS_EPI16}(vacc_lo, vacc_hi); 94 _mm_storeu_si128((__m128i*) y, vy); 95 y += 16; 96 } 97 if XNN_UNLIKELY(n != 0) { 98 assert(n >= 1 * sizeof(${XINT8_T})); 99 assert(n <= 15 * sizeof(${XINT8_T})); 100 101 const __m128i vx = _mm_loadu_si128((const __m128i*) x); 102 103 $if DATATYPE == "QU8": 104 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero); 105 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero); 106 $else: 107 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx); 108 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vm); 109 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vm); 110 vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo); 111 vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi); 112 vacc_lo = _mm_slli_epi16(vacc_lo, 7); 113 vacc_hi = _mm_slli_epi16(vacc_hi, 7); 114 vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier); 115 vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier); 116 vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point); 117 vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point); 118 119 __m128i vy = ${_MM_PACKXS_EPI16}(vacc_lo, vacc_hi); 120 if (n & (8 * sizeof(${XINT8_T}))) { 121 _mm_storel_epi64((__m128i*) y, vy); 122 vy = _mm_unpackhi_epi64(vy, vy); 123 y += 8; 124 } 125 if (n & (4 * sizeof(${XINT8_T}))) { 126 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy)); 127 vy = _mm_srli_epi64(vy, 32); 128 y += 4; 129 } 130 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy); 131 if (n & (2 * sizeof(${XINT8_T}))) { 132 unaligned_store_u16(y, (uint16_t) vy_lo); 133 vy_lo >>= 16; 134 y += 2; 135 } 136 if (n & (1 * sizeof(${XINT8_T}))) { 137 *y = (${XINT8_T}) vy_lo; 138 } 139 } 140} 141