xref: /aosp_15_r20/external/XNNPACK/src/f16-f32-vcvt/sse-int32.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert SSE in [2, 4]
7$assert not AVX or SSE == 4
8$assert BATCH_TILE % 8 == 0
9$assert BATCH_TILE >= 8
10$SIMD_TILE = BATCH_TILE // 8
11$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
12$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
13#include <assert.h>
14
15#include <${SSE_HEADER}>
16
17#include <xnnpack/common.h>
18#include <xnnpack/vcvt.h>
19
20
21$ISA = "avx" if AVX else {2: "sse2", 4: "sse41"}[SSE]
22void xnn_f16_f32_vcvt_ukernel__${ISA}_int32_x${BATCH_TILE}(
23    size_t n,
24    const void* input,
25    float* output,
26    const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27{
28  assert(n != 0);
29  assert(n % sizeof(uint16_t) == 0);
30  assert(input != NULL);
31  assert(output != NULL);
32
33  const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int32.sign_mask);
34  const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int32.exp_offset);
35  const __m128 vexp_scale = _mm_load_ps(params->sse_int32.exp_scale);
36  const __m128i vmagic_bias = _mm_load_si128((const __m128i*) params->sse_int32.magic_bias);
37  const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int32.denorm_cutoff);
38
39  const uint16_t* i = (const uint16_t*) input;
40  $if BATCH_TILE > 8:
41    for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) {
42      const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
43      $for N in range(1, SIMD_TILE):
44        const __m128i vh${N} = _mm_loadu_si128((const __m128i*) (i + ${N * 8}));
45      i += ${BATCH_TILE};
46
47      $for N in range(SIMD_TILE):
48        const __m128i vw${2*N} = _mm_unpacklo_epi16(_mm_setzero_si128(), vh${N});
49        const __m128i vw${2*N+1} = _mm_unpackhi_epi16(_mm_setzero_si128(), vh${N});
50
51      $for N in range(2*SIMD_TILE):
52        const __m128i vsign${N} = _mm_and_si128(vw${N}, vsign_mask);
53
54      $for N in range(2*SIMD_TILE):
55        const __m128i vnonsign${N} = _mm_xor_si128(vw${N}, vsign${N});
56
57      $for N in range(2*SIMD_TILE):
58        const __m128i vnorm${N} = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign${N}, 3), vexp_offset)), vexp_scale));
59
60      $for N in range(2*SIMD_TILE):
61        const __m128i vdenorm${N} = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign${N}, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
62
63      $for N in range(2*SIMD_TILE):
64        const __m128i vmask${N} = _mm_cmpgt_epi32(vnonsign${N}, vdenorm_cutoff);
65
66      $for N in range(2*SIMD_TILE):
67        $if SSE == 4:
68          const __m128i vf${N} = _mm_or_si128(vsign${N}, _mm_blendv_epi8(vdenorm${N}, vnorm${N}, vmask${N}));
69        $else:
70          const __m128i vf${N} = _mm_or_si128(vsign${N},
71            _mm_or_si128(_mm_and_si128(vmask${N}, vnorm${N}), _mm_andnot_si128(vmask${N}, vdenorm${N})));
72
73      _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
74      $for N in range(1, 2*SIMD_TILE):
75        _mm_storeu_ps(output + ${N * 4}, _mm_castsi128_ps(vf${N}));
76      output += ${BATCH_TILE};
77    }
78  for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
79    const __m128i vh = _mm_loadu_si128((const __m128i*) i);
80    i += 8;
81
82    const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
83    const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
84
85    const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
86    const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
87
88    const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
89    const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
90
91    const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
92    const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
93
94    const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
95    const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
96
97    const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
98    $if SSE == 4:
99      const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
100    $else:
101      const __m128i vf_lo = _mm_or_si128(vsign_lo,
102        _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
103
104    const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
105    $if SSE == 4:
106      const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
107    $else:
108      const __m128i vf_hi = _mm_or_si128(vsign_hi,
109        _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
110
111    _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
112    _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
113    output += 8;
114  }
115  if XNN_UNPREDICTABLE(n != 0) {
116    const __m128i vh = _mm_loadu_si128((const __m128i*) i);
117
118    const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
119    const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
120
121    const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
122    const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
123
124    const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
125    const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
126
127    const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
128    const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
129
130    const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
131    const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
132
133    const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
134    $if SSE == 4:
135      __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
136    $else:
137      __m128i vf = _mm_or_si128(vsign_lo,
138        _mm_or_si128(_mm_and_si128(vmask_lo, vnorm_lo), _mm_andnot_si128(vmask_lo, vdenorm_lo)));
139
140    if (n & (4 * sizeof(uint16_t))) {
141      _mm_storeu_ps(output, _mm_castsi128_ps(vf));
142      output += 4;
143
144      const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
145      $if SSE == 4:
146        vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
147      $else:
148        vf = _mm_or_si128(vsign_hi,
149          _mm_or_si128(_mm_and_si128(vmask_hi, vnorm_hi), _mm_andnot_si128(vmask_hi, vdenorm_hi)));
150    }
151    if (n & (2 * sizeof(uint16_t))) {
152      _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
153      output += 2;
154
155      vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
156    }
157    if (n & (1 * sizeof(uint16_t))) {
158      _mm_store_ss(output, _mm_castsi128_ps(vf));
159    }
160  }
161}
162