xref: /aosp_15_r20/external/XNNPACK/src/f16-vlrelu/neonfp16arith.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2022 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <arm_neon.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vunary.h>
15
16
17void xnn_f16_vlrelu_ukernel__neonfp16arith_x${BATCH_TILE}(
18    size_t batch,
19    const void* input,
20    void* output,
21    const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
22{
23  assert(batch != 0);
24  assert(batch % sizeof(__fp16) == 0);
25
26  const float16x8_t vslope = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.slope));
27  const __fp16* i = (const __fp16*) input;
28  __fp16* o = (__fp16*) output;
29  $if BATCH_TILE > 4:
30    for (; batch >= ${BATCH_TILE} * sizeof(__fp16); batch -= ${BATCH_TILE} * sizeof(__fp16)) {
31      $for N in range(0, BATCH_TILE, 8):
32        const float16x8_t vx${ABC[N:N+8]} = vld1q_f16(i); i += 8;
33
34      $for N in range(0, BATCH_TILE, 8):
35        float16x8_t vacc${ABC[N:N+8]} = vmulq_f16(vx${ABC[N:N+8]}, vslope);
36        const uint16x8_t vmask${ABC[N:N+8]} = vcltq_s16(vreinterpretq_s16_f16(vx${ABC[N:N+8]}), vmovq_n_s16(0));
37
38      $for N in range(0, BATCH_TILE, 8):
39        vacc${ABC[N:N+8]} = vbslq_f16(vmask${ABC[N:N+8]}, vacc${ABC[N:N+8]}, vx${ABC[N:N+8]});
40
41      $for N in range(0, BATCH_TILE, 8):
42        vst1q_f16(o, vacc${ABC[N:N+8]}); o += 8;
43    }
44  for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
45    const float16x8_t vx = vld1q_f16(i); i += 8;
46    float16x8_t vacc = vmulq_f16(vx, vslope);
47    const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
48    vacc = vbslq_f16(vmask, vacc, vx);
49    vst1q_f16(o, vacc); o += 8;
50  }
51  if XNN_UNLIKELY(batch != 0) {
52    const float16x8_t vx = vld1q_f16(i);
53    float16x8_t vacc = vmulq_f16(vx, vslope);
54    const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
55    vacc = vbslq_f16(vmask, vacc, vx);
56
57    float16x4_t vacc_lo = vget_low_f16(vacc);
58    if (batch & (4 * sizeof(__fp16))) {
59      vst1_f16(o, vacc_lo); o += 4;
60      vacc_lo = vget_high_f16(vacc);
61    }
62    if (batch & (2 * sizeof(__fp16))) {
63      vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
64      vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
65    }
66    if (batch & (1 * sizeof(__fp16))) {
67      vst1_lane_f16(o, vacc_lo, 0);
68    }
69  }
70}
71