xref: /aosp_15_r20/external/XNNPACK/src/f16-f32-vcvt/neon-int16.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$SIMD_TILE = BATCH_TILE // 8
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vcvt.h>
16
17
18void xnn_f16_f32_vcvt_ukernel__neon_int16_x${BATCH_TILE}(
19    size_t n,
20    const void* input,
21    float* output,
22    const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23{
24  assert(n != 0);
25  assert(n % sizeof(uint16_t) == 0);
26  assert(input != NULL);
27  assert(output != NULL);
28
29  const uint16x8_t vsign_mask = vmovq_n_u16(0x8000);
30  const uint16x8_t vexp_offset = vmovq_n_u16(0x7000);
31  const float32x4_t vexp_scale = vld1q_dup_f32(&params->neon.exp_scale);
32  const uint32x4_t vmagic_bias = vmovq_n_u32(0x3F000000);
33  const uint16x8_t vdenorm_cutoff = vmovq_n_u16(0x0400);
34
35  const uint16_t* i = (const uint16_t*) input;
36  $if BATCH_TILE > 8:
37    for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) {
38      $for N in range(SIMD_TILE):
39        const uint16x8_t vh${N} = vld1q_u16(i); i += 8;
40
41      $for N in range(SIMD_TILE):
42        const uint16x8_t vsign${N} = vandq_u16(vh${N}, vsign_mask);
43
44      $for N in range(SIMD_TILE):
45        const uint16x8_t vnonsign${N} = veorq_u16(vh${N}, vsign${N});
46
47      $for N in range(SIMD_TILE):
48        const uint16x8x2_t vprenorm${N} = vzipq_u16(vshlq_n_u16(vnonsign${N}, 13), vsraq_n_u16(vexp_offset, vnonsign${N}, 3));
49
50      $for N in range(SIMD_TILE):
51        const float32x4_t vnorm${2*N} = vmulq_f32(vreinterpretq_f32_u16(vprenorm${N}.val[0]), vexp_scale);
52        const float32x4_t vnorm${2*N+1} = vmulq_f32(vreinterpretq_f32_u16(vprenorm${N}.val[1]), vexp_scale);
53
54      $for N in range(SIMD_TILE):
55        const float32x4_t vdenorm${2*N} = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign${N}))), vreinterpretq_f32_u32(vmagic_bias));
56        const float32x4_t vdenorm${2*N+1} = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign${N}))), vreinterpretq_f32_u32(vmagic_bias));
57
58      $for N in range(SIMD_TILE):
59        const uint16x8_t vmask${N} = vcgtq_u16(vnonsign${N}, vdenorm_cutoff);
60
61      $for N in range(SIMD_TILE):
62        const uint32x4_t vxmask${2*N} = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask${N}))));
63        const uint32x4_t vf${2*N} = vorrq_u32(vshll_n_u16(vget_low_u16(vsign${N}), 16),
64          vreinterpretq_u32_f32(vbslq_f32(vxmask${2*N}, vnorm${2*N}, vdenorm${2*N})));
65
66      $for N in range(SIMD_TILE):
67        const uint32x4_t vxmask${2*N+1} = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask${N}))));
68        const uint32x4_t vf${2*N+1} = vorrq_u32(vshll_n_u16(vget_high_u16(vsign${N}), 16),
69          vreinterpretq_u32_f32(vbslq_f32(vxmask${2*N+1}, vnorm${2*N+1}, vdenorm${2*N+1})));
70
71      $for N in range(2*SIMD_TILE):
72        vst1q_f32(output, vreinterpretq_f32_u32(vf${N})); output += 4;
73    }
74  for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
75    const uint16x8_t vh = vld1q_u16(i); i += 8;
76
77    const uint16x8_t vsign = vandq_u16(vh, vsign_mask);
78
79    const uint16x8_t vnonsign = veorq_u16(vh, vsign);
80
81    const uint16x8x2_t vprenorm = vzipq_u16(vshlq_n_u16(vnonsign, 13), vsraq_n_u16(vexp_offset, vnonsign, 3));
82    const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[0]), vexp_scale);
83    const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[1]), vexp_scale);
84
85    const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
86    const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
87
88    const uint16x8_t vmask = vcgtq_u16(vnonsign, vdenorm_cutoff);
89
90    const uint32x4_t vxmask_lo = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask))));
91    const uint32x4_t vf_lo = vorrq_u32(vshll_n_u16(vget_low_u16(vsign), 16),
92      vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
93
94    const uint32x4_t vxmask_hi = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask))));
95    const uint32x4_t vf_hi = vorrq_u32(vshll_n_u16(vget_high_u16(vsign), 16),
96      vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
97
98    vst1q_f32(output, vreinterpretq_f32_u32(vf_lo)); output += 4;
99    vst1q_f32(output, vreinterpretq_f32_u32(vf_hi)); output += 4;
100  }
101  if XNN_UNPREDICTABLE(n != 0) {
102    const uint16x8_t vh = vld1q_u16(i); i += 8;
103
104    const uint16x8_t vsign = vandq_u16(vh, vsign_mask);
105
106    const uint16x8_t vnonsign = veorq_u16(vh, vsign);
107
108    const uint16x8x2_t vprenorm = vzipq_u16(vshlq_n_u16(vnonsign, 13), vsraq_n_u16(vexp_offset, vnonsign, 3));
109    const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[0]), vexp_scale);
110    const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[1]), vexp_scale);
111
112    const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
113    const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
114
115    const uint16x8_t vmask = vcgtq_u16(vnonsign, vdenorm_cutoff);
116
117    const uint32x4_t vxmask_lo = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask))));
118    uint32x4_t vf = vorrq_u32(vshll_n_u16(vget_low_u16(vsign), 16),
119      vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
120
121    if (n & (4 * sizeof(uint16_t))) {
122      vst1q_f32(output, vreinterpretq_f32_u32(vf)); output += 4;
123
124      const uint32x4_t vxmask_hi = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask))));
125      vf = vorrq_u32(vshll_n_u16(vget_high_u16(vsign), 16),
126        vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
127    }
128    uint32x2_t vf_lo = vget_low_u32(vf);
129    if (n & (2 * sizeof(uint16_t))) {
130      vst1_f32(output, vreinterpret_f32_u32(vf_lo)); output += 2;
131      vf_lo = vget_high_u32(vf);
132    }
133    if (n & (1 * sizeof(uint16_t))) {
134      vst1_lane_f32(output, vreinterpret_f32_u32(vf_lo), 0);
135    }
136  }
137}
138