xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/avx-rr2-p5.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$assert DIV_ALGO in ["div", "nr2"]
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10$SIMD_TILE = BATCH_TILE // 8
11#include <assert.h>
12
13#include <immintrin.h>
14
15#include <xnnpack/common.h>
16#include <xnnpack/vunary.h>
17
18
19void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_${DIV_ALGO}_x${BATCH_TILE}(
20    size_t n,
21    const float* x,
22    float* y,
23    const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
24{
25  assert(n % sizeof(float) == 0);
26
27  const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
28  const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
29  const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
30  const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
31  const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
32  const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
33  const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
34  const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
35  const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
36  const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
37  const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
38  $if DIV_ALGO == "nr2":
39    const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
40  const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
41
42  $if BATCH_TILE > 8:
43    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
44      const __m256 vx${ABC[0]} = _mm256_loadu_ps(x);
45      $for N in range(1, SIMD_TILE):
46        const __m256 vx${ABC[N]} = _mm256_loadu_ps(x + ${N * 8});
47      x += ${BATCH_TILE};
48
49      $for N in range(SIMD_TILE):
50        const __m256 vz${ABC[N]} = _mm256_or_ps(vx${ABC[N]}, vsign_mask);
51
52      $for N in range(SIMD_TILE):
53        __m256 vn${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vz${ABC[N]}, vlog2e), vmagic_bias);
54
55      $for N in range(SIMD_TILE):
56        const __m128 vs_lo${ABC[N]} = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn${ABC[N]})), 23));
57        const __m128 vs_hi${ABC[N]} = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn${ABC[N]}, 1)), 23));
58        const __m256 vs${ABC[N]} = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo${ABC[N]}), vs_hi${ABC[N]}, 1);
59
60      $for N in range(SIMD_TILE):
61        vn${ABC[N]} = _mm256_sub_ps(vn${ABC[N]}, vmagic_bias);
62
63      $for N in range(SIMD_TILE):
64        __m256 vt${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vn${ABC[N]}, vminus_ln2_hi), vz${ABC[N]});
65
66      $for N in range(SIMD_TILE):
67        vt${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vn${ABC[N]}, vminus_ln2_lo), vt${ABC[N]});
68
69      $for N in range(SIMD_TILE):
70        __m256 vp${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vc5, vt${ABC[N]}), vc4);
71
72      $for N in range(SIMD_TILE):
73        vp${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vp${ABC[N]}, vt${ABC[N]}), vc3);
74
75      $for N in range(SIMD_TILE):
76        vp${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vp${ABC[N]}, vt${ABC[N]}), vc2);
77
78      $for N in range(SIMD_TILE):
79        vp${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vp${ABC[N]}, vt${ABC[N]}), vc1);
80
81      $for N in range(SIMD_TILE):
82        vt${ABC[N]} = _mm256_mul_ps(vt${ABC[N]}, vs${ABC[N]});
83
84      $for N in range(SIMD_TILE):
85        const __m256 ve${ABC[N]} = _mm256_add_ps(_mm256_mul_ps(vt${ABC[N]}, vp${ABC[N]}), vs${ABC[N]});
86
87      $for N in range(SIMD_TILE):
88        const __m256 vd${ABC[N]} = _mm256_add_ps(ve${ABC[N]}, vone);
89
90      $if DIV_ALGO == "div":
91        $for N in range(SIMD_TILE):
92          __m256 vf${ABC[N]} = _mm256_div_ps(ve${ABC[N]}, vd${ABC[N]});
93      $else:
94        $for N in range(SIMD_TILE):
95          __m256 vr${ABC[N]} = _mm256_rcp_ps(vd${ABC[N]});
96
97        $for N in range(SIMD_TILE):
98          vr${ABC[N]} = _mm256_mul_ps(vr${ABC[N]}, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr${ABC[N]}, vd${ABC[N]})));
99          vr${ABC[N]} = _mm256_mul_ps(vr${ABC[N]}, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr${ABC[N]}, vd${ABC[N]})));
100
101        $for N in range(SIMD_TILE):
102          __m256 vf${ABC[N]} = _mm256_mul_ps(ve${ABC[N]}, vr${ABC[N]});
103
104      $for N in range(SIMD_TILE):
105        vf${ABC[N]} = _mm256_andnot_ps(_mm256_cmp_ps(vz${ABC[N]}, vdenorm_cutoff, _CMP_LT_OS), vf${ABC[N]});
106
107      $for N in range(SIMD_TILE):
108        vf${ABC[N]} = _mm256_blendv_ps(_mm256_sub_ps(vone, vf${ABC[N]}), vf${ABC[N]}, vx${ABC[N]});
109
110      _mm256_storeu_ps(y, vf${ABC[0]});
111      $for N in range(1, SIMD_TILE):
112        _mm256_storeu_ps(y + ${N * 8}, vf${ABC[N]});
113      y += ${BATCH_TILE};
114    }
115  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
116    const __m256 vx = _mm256_loadu_ps(x);
117    x += 8;
118
119    const __m256 vz = _mm256_or_ps(vx, vsign_mask);
120
121    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
122
123    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
124    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
125    const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
126
127    vn = _mm256_sub_ps(vn, vmagic_bias);
128
129    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
130    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
131
132    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
133    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
134    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
135    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
136
137    vt = _mm256_mul_ps(vt, vs);
138    const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
139
140    const __m256 vd = _mm256_add_ps(ve, vone);
141    $if DIV_ALGO == "div":
142      __m256 vf = _mm256_div_ps(ve, vd);
143    $else:
144      __m256 vr = _mm256_rcp_ps(vd);
145      vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
146      vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
147      __m256 vf = _mm256_mul_ps(ve, vr);
148
149    vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
150    vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
151
152    _mm256_storeu_ps(y, vf);
153    y += 8;
154  }
155  if XNN_UNLIKELY(n != 0) {
156    assert(n >= 1 * sizeof(float));
157    assert(n <= 7 * sizeof(float));
158    const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx_rr2_p5.mask_table[7] - n));
159
160    const __m256 vx = _mm256_maskload_ps(x, vmask);
161
162    const __m256 vz = _mm256_or_ps(vx, vsign_mask);
163
164    __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
165    const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
166    const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
167    const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
168
169    vn = _mm256_sub_ps(vn, vmagic_bias);
170
171    __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
172    vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
173
174    __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
175    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
176    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
177    vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
178
179    vt = _mm256_mul_ps(vt, vs);
180    const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
181
182    const __m256 vd = _mm256_add_ps(ve, vone);
183    $if DIV_ALGO == "div":
184      __m256 vf = _mm256_div_ps(ve, vd);
185    $else:
186      __m256 vr = _mm256_rcp_ps(vd);
187      vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
188      vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
189      __m256 vf = _mm256_mul_ps(ve, vr);
190
191    vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
192    vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
193
194    __m128 vf_lo = _mm256_castps256_ps128(vf);
195    if (n & (4 * sizeof(float))) {
196      _mm_storeu_ps(y, vf_lo);
197      vf_lo = _mm256_extractf128_ps(vf, 1);
198      y += 4;
199    }
200    if (n & (2 * sizeof(float))) {
201      _mm_storel_pi((__m64*) y, vf_lo);
202      vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
203      y += 2;
204    }
205    if (n & (1 * sizeof(float))) {
206      _mm_store_ss(y, vf_lo);
207    }
208  }
209}
210