1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE >= 16 7$assert BATCH_TILE == 16 or BATCH_TILE % 32 == 0 8$SIMD_TILE = BATCH_TILE // 32 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <immintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/intrinsics-polyfill.h> 16#include <xnnpack/vlrelu.h> 17 18 19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 20$_MM256_CVTEPX8_EPI16 = {"QS8": "_mm256_cvtepi8_epi16", "QU8": "_mm256_cvtepu8_epi16"}[DATATYPE] 21$_MM256_PACKXS_EPI16 = {"QS8": "_mm256_packs_epi16", "QU8": "_mm256_packus_epi16"}[DATATYPE] 22$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 23void xnn_${DATATYPE.lower()}_vlrelu_ukernel__avx2_x${BATCH_TILE}( 24 size_t n, 25 const ${XINT8_T}* x, 26 ${XINT8_T}* y, 27 const union xnn_${DATATYPE.lower()}_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 28{ 29 assert(n != 0); 30 assert(n % sizeof(${XINT8_T}) == 0); 31 assert(x != NULL); 32 assert(y != NULL); 33 34 const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point); 35 const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier); 36 const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier); 37 const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point); 38 $if BATCH_TILE > 16: 39 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 40 __m256i vacc${ABC[0]} = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) x)); 41 $for N in range(1, 2*SIMD_TILE): 42 __m256i vacc${ABC[N]} = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) (x + ${N * 16}))); 43 x += ${BATCH_TILE}; 44 45 $for N in range(2*SIMD_TILE): 46 __m256i vmultiplier${ABC[N]} = _mm256_cmpgt_epi16(vacc${ABC[N]}, vinput_zero_point); 47 vacc${ABC[N]} = _mm256_sub_epi16(vinput_zero_point, vacc${ABC[N]}); 48 49 $for N in range(2*SIMD_TILE): 50 vmultiplier${ABC[N]} = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier${ABC[N]}); 51 vacc${ABC[N]} = _mm256_slli_epi16(vacc${ABC[N]}, 7); 52 53 $for N in range(2*SIMD_TILE): 54 vacc${ABC[N]} = _mm256_mulhrs_epi16(vacc${ABC[N]}, vmultiplier${ABC[N]}); 55 56 $for N in range(2*SIMD_TILE): 57 vacc${ABC[N]} = _mm256_adds_epi16(vacc${ABC[N]}, voutput_zero_point); 58 59 $for N in range(SIMD_TILE): 60 __m256i vy${ABC[N]} = ${_MM256_PACKXS_EPI16}(vacc${ABC[2*N]}, vacc${ABC[2*N+1]}); 61 62 $for N in range(SIMD_TILE): 63 vy${ABC[N]} = _mm256_permute4x64_epi64(vy${ABC[N]}, _MM_SHUFFLE(3, 1, 2, 0)); 64 65 _mm256_storeu_si256((__m256i*) y, vy${ABC[0]}); 66 $for N in range(1, SIMD_TILE): 67 _mm256_storeu_si256((__m256i*) (y + ${N * 32}), vy${ABC[N]}); 68 y += ${BATCH_TILE}; 69 } 70 for (; n >= 16 * sizeof(${XINT8_T}); n -= 16 * sizeof(${XINT8_T})) { 71 __m256i vacc = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) x)); 72 __m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point); 73 vacc = _mm256_sub_epi16(vinput_zero_point, vacc); 74 vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier); 75 vacc = _mm256_slli_epi16(vacc, 7); 76 vacc = _mm256_mulhrs_epi16(vacc, vmultiplier); 77 vacc = _mm256_adds_epi16(vacc, voutput_zero_point); 78 x += 16; 79 80 const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1); 81 const __m128i vy = ${_MM_PACKXS_EPI16}(_mm256_castsi256_si128(vacc), vacc_hi); 82 _mm_storeu_si128((__m128i*) y, vy); 83 y += 16; 84 } 85 if XNN_UNLIKELY(n != 0) { 86 assert(n >= 1 * sizeof(${XINT8_T})); 87 assert(n <= 15 * sizeof(${XINT8_T})); 88 89 __m256i vacc = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) x)); 90 __m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point); 91 vacc = _mm256_sub_epi16(vinput_zero_point, vacc); 92 vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier); 93 vacc = _mm256_slli_epi16(vacc, 7); 94 vacc = _mm256_mulhrs_epi16(vacc, vmultiplier); 95 vacc = _mm256_adds_epi16(vacc, voutput_zero_point); 96 97 const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1); 98 __m128i vy = ${_MM_PACKXS_EPI16}(_mm256_castsi256_si128(vacc), vacc_hi); 99 if (n & (8 * sizeof(${XINT8_T}))) { 100 _mm_storel_epi64((__m128i*) y, vy); 101 vy = _mm_unpackhi_epi64(vy, vy); 102 y += 8; 103 } 104 if (n & (4 * sizeof(${XINT8_T}))) { 105 _mm_storeu_si32(y, vy); 106 vy = _mm_srli_epi64(vy, 32); 107 y += 4; 108 } 109 if (n & (2 * sizeof(${XINT8_T}))) { 110 _mm_storeu_si16(y, vy); 111 vy = _mm_srli_epi32(vy, 16); 112 y += 2; 113 } 114 if (n & (1 * sizeof(${XINT8_T}))) { 115 *y = (${XINT8_T}) _mm_extract_epi8(vy, 0); 116 } 117 } 118} 119