xref: /aosp_15_r20/external/XNNPACK/src/f32-vhswish/avx.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vunary.h>
15
16
17$ISA = {0: "avx", 3: "fma3"}[FMA]
18void xnn_f32_vhswish_ukernel__${ISA}_x${BATCH_TILE}(
19    size_t n,
20    const float* x,
21    float* y,
22    const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
23{
24  assert(n != 0);
25  assert(n % sizeof(float) == 0);
26
27  const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
28  const __m256 vhalf = _mm256_load_ps(params->avx.half);
29  const __m256 vone = _mm256_load_ps(params->avx.one);
30  const __m256 vzero = _mm256_setzero_ps();
31
32  $if BATCH_TILE > 8:
33    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
34      const __m256 vx${ABC[0:8]} = _mm256_loadu_ps(x);
35      $for N in range(8, BATCH_TILE, 8):
36        const __m256 vx${ABC[N:N+8]} = _mm256_loadu_ps(x + ${N});
37      x += ${BATCH_TILE};
38
39      $if FMA == 3:
40        $for N in range(0, BATCH_TILE, 8):
41          __m256 vacc${ABC[N:N+8]} = _mm256_fmadd_ps(vx${ABC[N:N+8]}, vsixth, vhalf);
42      $else:
43        $for N in range(0, BATCH_TILE, 8):
44          __m256 vacc${ABC[N:N+8]} = _mm256_mul_ps(vx${ABC[N:N+8]}, vsixth);
45
46        $for N in range(0, BATCH_TILE, 8):
47          vacc${ABC[N:N+8]} = _mm256_add_ps(vacc${ABC[N:N+8]}, vhalf);
48
49      $for N in range(0, BATCH_TILE, 8):
50        vacc${ABC[N:N+8]} = _mm256_max_ps(vacc${ABC[N:N+8]}, vzero);
51
52      $for N in range(0, BATCH_TILE, 8):
53        vacc${ABC[N:N+8]} = _mm256_min_ps(vacc${ABC[N:N+8]}, vone);
54
55      $for N in range(0, BATCH_TILE, 8):
56        vacc${ABC[N:N+8]} = _mm256_mul_ps(vacc${ABC[N:N+8]}, vx${ABC[N:N+8]});
57
58      _mm256_storeu_ps(y, vacc${ABC[0:8]});
59      $for N in range(8, BATCH_TILE, 8):
60        _mm256_storeu_ps(y + ${N}, vacc${ABC[N:N+8]});
61      y += ${BATCH_TILE};
62    }
63  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
64    const __m256 vx = _mm256_loadu_ps(x);
65    x += 8;
66    $if FMA == 3:
67      __m256 vacc = _mm256_fmadd_ps(vx, vsixth, vhalf);
68    $else:
69      __m256 vacc = _mm256_mul_ps(vx, vsixth);
70      vacc = _mm256_add_ps(vacc, vhalf);
71    vacc = _mm256_max_ps(vacc, vzero);
72    vacc = _mm256_min_ps(vacc, vone);
73    vacc = _mm256_mul_ps(vacc, vx);
74    _mm256_storeu_ps(y, vacc);
75    y += 8;
76  }
77  if XNN_UNLIKELY(n != 0) {
78    assert(n >= 1 * sizeof(float));
79    assert(n <= 7 * sizeof(float));
80    const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - n));
81
82    const __m256 vx = _mm256_maskload_ps(x, vmask);
83    $if FMA == 3:
84      __m256 vacc = _mm256_fmadd_ps(vx, vsixth, vhalf);
85    $else:
86      __m256 vacc = _mm256_mul_ps(vx, vsixth);
87      vacc = _mm256_add_ps(vacc, vhalf);
88    vacc = _mm256_max_ps(vacc, vzero);
89    vacc = _mm256_min_ps(vacc, vone);
90    vacc = _mm256_mul_ps(vacc, vx);
91
92    __m128 vacc_lo = _mm256_castps256_ps128(vacc);
93    if (n & (4 * sizeof(float))) {
94      _mm_storeu_ps(y, vacc_lo);
95      vacc_lo = _mm256_extractf128_ps(vacc, 1);
96      y += 4;
97    }
98    if (n & (2 * sizeof(float))) {
99      _mm_storel_pi((__m64*) y, vacc_lo);
100      vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
101      y += 2;
102    }
103    if (n & (1 * sizeof(float))) {
104      _mm_store_ss(y, vacc_lo);
105    }
106  }
107}
108