xref: /aosp_15_r20/external/XNNPACK/src/f16-vhswish/f16c.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2022 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/vunary.h>
16
17
18void xnn_f16_vhswish_ukernel__f16c_x${BATCH_TILE}(
19    size_t n,
20    const void* restrict x_ptr,
21    void* restrict y_ptr,
22    const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23{
24  assert(n != 0);
25  assert(n % sizeof(uint16_t) == 0);
26
27  const uint16_t* x = (const uint16_t*) x_ptr;
28  uint16_t* y = (uint16_t*) y_ptr;
29
30  const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
31  const __m256 vthree = _mm256_load_ps(params->avx.three);
32  const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six);
33  const __m128i vzero = _mm_setzero_si128();
34
35  $if BATCH_TILE > 8:
36    for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) {
37      __m256 vx${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
38      $for N in range(8, BATCH_TILE, 8):
39        __m256 vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (x + ${N})));
40      x += ${BATCH_TILE};
41
42      $for N in range(0, BATCH_TILE, 8):
43        __m128i vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_add_ps(vx${ABC[N:N+8]}, vthree), _MM_FROUND_NO_EXC);
44        vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx${ABC[N:N+8]}, vsixth), _MM_FROUND_NO_EXC));
45
46      $for N in range(0, BATCH_TILE, 8):
47        vacc${ABC[N:N+8]} = _mm_max_epi16(vacc${ABC[N:N+8]}, vzero);
48
49      $for N in range(0, BATCH_TILE, 8):
50        vacc${ABC[N:N+8]} = _mm_min_epi16(vacc${ABC[N:N+8]}, vsix);
51
52      $for N in range(0, BATCH_TILE, 8):
53        vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[N:N+8]}), vx${ABC[N:N+8]}), _MM_FROUND_NO_EXC);
54
55      _mm_storeu_si128((__m128i*) y, vacc${ABC[0:8]});
56      $for N in range(8, BATCH_TILE, 8):
57        _mm_storeu_si128((__m128i*) (y + ${N}), vacc${ABC[N:N+8]});
58      y += ${BATCH_TILE};
59    }
60  for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
61    __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
62    x += 8;
63    __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC);
64    vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC));
65    vacc = _mm_max_epi16(vacc, vzero);
66    vacc = _mm_min_epi16(vacc, vsix);
67    vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC);
68    _mm_storeu_si128((__m128i*) y, vacc);
69    y += 8;
70  }
71  if XNN_UNLIKELY(n != 0) {
72    __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
73    __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC);
74    vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC));
75    vacc = _mm_max_epi16(vacc, vzero);
76    vacc = _mm_min_epi16(vacc, vsix);
77    vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC);
78
79    if (n & (4 * sizeof(uint16_t))) {
80      _mm_storel_epi64((__m128i*) y, vacc);
81      vacc = _mm_unpackhi_epi64(vacc, vacc);
82      y += 4;
83    }
84    if (n & (2 * sizeof(uint16_t))) {
85      _mm_storeu_si32(y, vacc);
86      vacc = _mm_srli_epi64(vacc, 32);
87      y += 2;
88    }
89    if (n & (1 * sizeof(uint16_t))) {
90      *y = (uint16_t) _mm_extract_epi16(vacc, 0);
91    }
92  }
93}
94