xref: /aosp_15_r20/external/XNNPACK/src/f32-vrnd/vrndne-neon.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <arm_neon.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/math.h>
15#include <xnnpack/vunary.h>
16
17
18void xnn_f32_vrndne_ukernel__neon_x${BATCH_TILE}(
19    size_t n,
20    const float* x,
21    float* y,
22    const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23{
24  assert(n != 0);
25  assert(n % sizeof(float) == 0);
26
27  const float32x4_t vmagic_number = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
28  for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
29    $for N in range(0, BATCH_TILE, 4):
30      const float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4;
31
32    $for N in range(0, BATCH_TILE, 4):
33      const float32x4_t vabsx${ABC[N:N+4]} = vabsq_f32(vx${ABC[N:N+4]});
34      uint32x4_t vrndmask${ABC[N:N+4]} = vcaltq_f32(vmagic_number, vx${ABC[N:N+4]});
35
36    $for N in range(0, BATCH_TILE, 4):
37      float32x4_t vrndabsx${ABC[N:N+4]} = vaddq_f32(vabsx${ABC[N:N+4]}, vmagic_number);
38
39    $for N in range(0, BATCH_TILE, 4):
40      vrndmask${ABC[N:N+4]} = vorrq_u32(vrndmask${ABC[N:N+4]}, vmovq_n_u32(UINT32_C(0x80000000)));
41
42    $for N in range(0, BATCH_TILE, 4):
43      vrndabsx${ABC[N:N+4]} = vsubq_f32(vrndabsx${ABC[N:N+4]}, vmagic_number);
44
45    $for N in range(0, BATCH_TILE, 4):
46      const float32x4_t vy${ABC[N:N+4]} = vbslq_f32(vrndmask${ABC[N:N+4]}, vx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]});
47
48    $for N in range(0, BATCH_TILE, 4):
49      vst1q_f32(y, vy${ABC[N:N+4]}); y += 4;
50  }
51  $if BATCH_TILE > 4:
52    for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
53      const float32x4_t vx = vld1q_f32(x); x += 4;
54      const float32x4_t vabsx = vabsq_f32(vx);
55      uint32x4_t vrndmask = vcaltq_f32(vmagic_number, vx);
56      float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number);
57      vrndmask = vorrq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
58      vrndabsx = vsubq_f32(vrndabsx, vmagic_number);
59      const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx);
60      vst1q_f32(y, vy); y += 4;
61    }
62  if XNN_UNLIKELY(n != 0) {
63    const float32x4_t vx = vld1q_f32(x);
64    const float32x4_t vabsx = vabsq_f32(vx);
65    uint32x4_t vrndmask = vcaltq_f32(vmagic_number, vx);
66    float32x4_t vrndabsx = vaddq_f32(vabsx, vmagic_number);
67    vrndmask = vorrq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
68    vrndabsx = vsubq_f32(vrndabsx, vmagic_number);
69    const float32x4_t vy = vbslq_f32(vrndmask, vx, vrndabsx);
70    float32x2_t vy_lo = vget_low_f32(vy);
71    if (n & (2 * sizeof(float))) {
72      vst1_f32(y, vy_lo); y += 2;
73      vy_lo = vget_high_f32(vy);
74    }
75    if (n & (1 * sizeof(float))) {
76      vst1_lane_f32(y, vy_lo, 0);
77    }
78  }
79}
80