xref: /aosp_15_r20/external/XNNPACK/src/qs8-vcvt/avx2.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2022 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE >= 16
7$assert BATCH_TILE == 16 or BATCH_TILE % 32 == 0
8$SIMD_TILE = BATCH_TILE // 32
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/intrinsics-polyfill.h>
16#include <xnnpack/vcvt.h>
17
18
19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
20$_MM256_CVTEPX8_EPI16 = {"QS8": "_mm256_cvtepi8_epi16", "QU8": "_mm256_cvtepu8_epi16"}[DATATYPE]
21$_MM256_PACKXS_EPI16 = {"QS8": "_mm256_packs_epi16", "QU8": "_mm256_packus_epi16"}[DATATYPE]
22$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE]
23void xnn_${DATATYPE.lower()}_vcvt_ukernel__avx2_x${BATCH_TILE}(
24    size_t n,
25    const ${XINT8_T}* x,
26    ${XINT8_T}* y,
27    const union xnn_${DATATYPE.lower()}_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28{
29  assert(n != 0);
30  assert(n % sizeof(${XINT8_T}) == 0);
31  assert(x != NULL);
32  assert(y != NULL);
33
34  const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
35  const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
36  const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
37  $if BATCH_TILE > 16:
38    for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
39      __m256i vacc${ABC[0]} = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) x));
40      $for N in range(1, 2*SIMD_TILE):
41        __m256i vacc${ABC[N]} = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) (x + ${N * 16})));
42      x += ${BATCH_TILE};
43
44      $for N in range(2*SIMD_TILE):
45        vacc${ABC[N]} = _mm256_sub_epi16(vinput_zero_point, vacc${ABC[N]});
46
47      $for N in range(2*SIMD_TILE):
48        vacc${ABC[N]} = _mm256_slli_epi16(vacc${ABC[N]}, 7);
49
50      $for N in range(2*SIMD_TILE):
51        vacc${ABC[N]} = _mm256_mulhrs_epi16(vacc${ABC[N]}, vmultiplier);
52
53      $for N in range(2*SIMD_TILE):
54        vacc${ABC[N]} = _mm256_adds_epi16(vacc${ABC[N]}, voutput_zero_point);
55
56      $for N in range(SIMD_TILE):
57        __m256i vy${ABC[N]} = ${_MM256_PACKXS_EPI16}(vacc${ABC[2*N]}, vacc${ABC[2*N+1]});
58
59      $for N in range(SIMD_TILE):
60        vy${ABC[N]} = _mm256_permute4x64_epi64(vy${ABC[N]}, _MM_SHUFFLE(3, 1, 2, 0));
61
62      _mm256_storeu_si256((__m256i*) y, vy${ABC[0]});
63      $for N in range(1, SIMD_TILE):
64        _mm256_storeu_si256((__m256i*) (y + ${N * 32}), vy${ABC[N]});
65      y += ${BATCH_TILE};
66    }
67  for (; n >= 16 * sizeof(${XINT8_T}); n -= 16 * sizeof(${XINT8_T})) {
68    __m256i vacc = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) x));
69    vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
70    vacc = _mm256_slli_epi16(vacc, 7);
71    vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
72    vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
73    x += 16;
74
75    const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
76    const __m128i vy = ${_MM_PACKXS_EPI16}(_mm256_castsi256_si128(vacc), vacc_hi);
77    _mm_storeu_si128((__m128i*) y, vy);
78    y += 16;
79  }
80  if XNN_UNLIKELY(n != 0) {
81    assert(n >= 1 * sizeof(${XINT8_T}));
82    assert(n <= 15 * sizeof(${XINT8_T}));
83
84    __m256i vacc = ${_MM256_CVTEPX8_EPI16}(_mm_loadu_si128((const __m128i*) x));
85    vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
86    vacc = _mm256_slli_epi16(vacc, 7);
87    vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
88    vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
89
90    const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
91    __m128i vy = ${_MM_PACKXS_EPI16}(_mm256_castsi256_si128(vacc), vacc_hi);
92    if (n & (8 * sizeof(${XINT8_T}))) {
93      _mm_storel_epi64((__m128i*) y, vy);
94      vy = _mm_unpackhi_epi64(vy, vy);
95      y += 8;
96    }
97    if (n & (4 * sizeof(${XINT8_T}))) {
98      _mm_storeu_si32(y, vy);
99      vy = _mm_srli_epi64(vy, 32);
100      y += 4;
101    }
102    if (n & (2 * sizeof(${XINT8_T}))) {
103      _mm_storeu_si16(y, vy);
104      vy = _mm_srli_epi32(vy, 16);
105      y += 2;
106    }
107    if (n & (1 * sizeof(${XINT8_T}))) {
108      *y = (${XINT8_T}) _mm_extract_epi8(vy, 0);
109    }
110  }
111}
112