xref: /aosp_15_r20/external/XNNPACK/src/f32-qs8-vcvt/avx.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert DATATYPE in ["QS8", "QU8"]
7$assert BATCH_TILE % 8 == 0
8$assert BATCH_TILE >= 8
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/intrinsics-polyfill.h>
16#include <xnnpack/vcvt.h>
17
18
19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
20$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE]
21$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE]
22void xnn_f32_${DATATYPE.lower()}_vcvt_ukernel__avx_x${BATCH_TILE}(
23    size_t n,
24    const float* x,
25    ${XINT8_T}* y,
26    const union xnn_f32_${DATATYPE.lower()}_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
27{
28  assert(n != 0);
29  assert(n % sizeof(float) == 0);
30  assert(x != NULL);
31  assert(y != NULL);
32
33  const __m256 vscale = _mm256_load_ps(params->avx.scale);
34  const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx.output_max_less_zero_point);
35  const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
36  const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx.output_min);
37
38  $if BATCH_TILE > 8:
39    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
40      __m256 vx${ABC[0:8]} = _mm256_loadu_ps(x);
41      $for N in range(8, BATCH_TILE, 8):
42        __m256 vx${ABC[N:N+8]} = _mm256_loadu_ps(x + ${N});
43      x += ${BATCH_TILE};
44
45      $for N in range(0, BATCH_TILE, 8):
46        vx${ABC[N:N+8]} = _mm256_mul_ps(vx${ABC[N:N+8]}, vscale);
47
48      $for N in range(0, BATCH_TILE, 8):
49        vx${ABC[N:N+8]} = _mm256_min_ps(vx${ABC[N:N+8]}, voutput_max_less_zero_point);
50
51      $for N in range(0, BATCH_TILE, 8):
52        const __m256i vacc${ABC[N:N+8]} = _mm256_cvtps_epi32(vx${ABC[N:N+8]});
53
54      $for N in range(0, BATCH_TILE, 8):
55        __m128i vy${ABC[N:N+8]} = _mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extractf128_si256(vacc${ABC[N:N+8]}, 1));
56
57      $for N in range(0, BATCH_TILE, 8):
58        vy${ABC[N:N+8]} = _mm_adds_epi16(vy${ABC[N:N+8]}, voutput_zero_point);
59
60      $for N in range(0, BATCH_TILE, 16):
61        $if N + 8 < BATCH_TILE:
62          __m128i vy${ABC[N:N+16]} = ${_MM_PACKXS_EPI16}(vy${ABC[N:N+8]}, vy${ABC[N+8:N+16]});
63        $else:
64          vy${ABC[N:N+8]} = ${_MM_PACKXS_EPI16}(vy${ABC[N:N+8]}, vy${ABC[N:N+8]});
65
66      $for N in range(0, BATCH_TILE, 16):
67        $if N + 8 < BATCH_TILE:
68          vy${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vy${ABC[N:N+16]}, voutput_min);
69        $else:
70          vy${ABC[N:N+8]} = ${_MM_MAX_EPX8}(vy${ABC[N:N+8]}, voutput_min);
71
72      _mm_storeu_si128((__m128i*) y, vy${ABC[0:16]});
73      $for N in range(16, BATCH_TILE, 16):
74        $if N + 8 < BATCH_TILE:
75          _mm_storeu_si128((__m128i*) (y + ${N}), vy${ABC[N:N+16]});
76        $else:
77          _mm_storel_epi64((__m128i*) (y + ${N}), vy${ABC[N:N+8]});
78      y += ${BATCH_TILE};
79    }
80  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
81    __m256 vx = _mm256_loadu_ps(x);
82    vx = _mm256_mul_ps(vx, vscale);
83    vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
84    x += 8;
85
86    const __m256i vacc = _mm256_cvtps_epi32(vx);
87
88    __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
89    vy = _mm_adds_epi16(vy, voutput_zero_point);
90    vy = ${_MM_PACKXS_EPI16}(vy, vy);
91    vy = ${_MM_MAX_EPX8}(vy, voutput_min);
92
93    _mm_storel_epi64((__m128i*) y, vy);
94    y += 8;
95  }
96  if XNN_UNLIKELY(n != 0) {
97    assert(n >= 1 * sizeof(float));
98    assert(n <= 7 * sizeof(float));
99    const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - n));
100
101    __m256 vx = _mm256_maskload_ps(x, vmask);
102    vx = _mm256_mul_ps(vx, vscale);
103    vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
104
105    const __m256i vacc = _mm256_cvtps_epi32(vx);
106
107    __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extractf128_si256(vacc, 1));
108    vy = _mm_adds_epi16(vy, voutput_zero_point);
109    vy = ${_MM_PACKXS_EPI16}(vy, vy);
110    vy = ${_MM_MAX_EPX8}(vy, voutput_min);
111
112    if (n & (4 * sizeof(float))) {
113      _mm_storeu_si32(y, vy);
114      y += 4;
115      vy = _mm_srli_epi64(vy, 32);
116    }
117    if (n & (2 * sizeof(float))) {
118      _mm_storeu_si16(y, vy);
119      y += 2;
120      vy = _mm_srli_epi32(vy, 16);
121    }
122    if (n & (1 * sizeof(float))) {
123      *y = (${XINT8_T}) _mm_extract_epi8(vy, 0);
124    }
125  }
126}
127