1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 4 == 0 7$assert BATCH_TILE >= 4 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9#include <assert.h> 10 11#include <arm_neon.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/math.h> 15#include <xnnpack/vunary.h> 16 17 18void xnn_f32_vrndu_ukernel__neon_x${BATCH_TILE}( 19 size_t n, 20 const float* x, 21 float* y, 22 const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 23{ 24 assert(n != 0); 25 assert(n % sizeof(float) == 0); 26 27 const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000))); 28 const float32x4_t vone = vmovq_n_f32(1.0f); 29 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 30 $for N in range(0, BATCH_TILE, 4): 31 const float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4; 32 33 $for N in range(0, BATCH_TILE, 4): 34 const int32x4_t vintx${ABC[N:N+4]} = vcvtq_s32_f32(vx${ABC[N:N+4]}); 35 36 $for N in range(0, BATCH_TILE, 4): 37 uint32x4_t vrndmask${ABC[N:N+4]} = vcaltq_f32(vx${ABC[N:N+4]}, vintegral_threshold); 38 39 $for N in range(0, BATCH_TILE, 4): 40 const float32x4_t vprerndx${ABC[N:N+4]} = vcvtq_f32_s32(vintx${ABC[N:N+4]}); 41 42 $for N in range(0, BATCH_TILE, 4): 43 vrndmask${ABC[N:N+4]} = vbicq_u32(vrndmask${ABC[N:N+4]}, vmovq_n_u32(UINT32_C(0x80000000))); 44 45 $for N in range(0, BATCH_TILE, 4): 46 const float32x4_t vrndx${ABC[N:N+4]} = vbslq_f32(vrndmask${ABC[N:N+4]}, vprerndx${ABC[N:N+4]}, vx${ABC[N:N+4]}); 47 48 $for N in range(0, BATCH_TILE, 4): 49 uint32x4_t vadjmask${ABC[N:N+4]} = vcgeq_f32(vrndx${ABC[N:N+4]}, vx${ABC[N:N+4]}); 50 51 $for N in range(0, BATCH_TILE, 4): 52 const float32x4_t vadjrndx${ABC[N:N+4]} = vaddq_f32(vrndx${ABC[N:N+4]}, vone); 53 54 $for N in range(0, BATCH_TILE, 4): 55 vadjmask${ABC[N:N+4]} = vorrq_u32(vadjmask${ABC[N:N+4]}, vmovq_n_u32(UINT32_C(0x80000000))); 56 57 $for N in range(0, BATCH_TILE, 4): 58 const float32x4_t vy${ABC[N:N+4]} = vbslq_f32(vadjmask${ABC[N:N+4]}, vrndx${ABC[N:N+4]}, vadjrndx${ABC[N:N+4]}); 59 60 $for N in range(0, BATCH_TILE, 4): 61 vst1q_f32(y, vy${ABC[N:N+4]}); y += 4; 62 } 63 $if BATCH_TILE > 4: 64 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { 65 const float32x4_t vx = vld1q_f32(x); x += 4; 66 const int32x4_t vintx = vcvtq_s32_f32(vx); 67 uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold); 68 const float32x4_t vprerndx = vcvtq_f32_s32(vintx); 69 vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000))); 70 const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx); 71 uint32x4_t vadjmask = vcgeq_f32(vrndx, vx); 72 const float32x4_t vadjrndx = vaddq_f32(vrndx, vone); 73 vadjmask = vorrq_u32(vadjmask, vmovq_n_u32(UINT32_C(0x80000000))); 74 const float32x4_t vy = vbslq_f32(vadjmask, vrndx, vadjrndx); 75 vst1q_f32(y, vy); y += 4; 76 } 77 if XNN_UNLIKELY(n != 0) { 78 const float32x4_t vx = vld1q_f32(x); 79 const int32x4_t vintx = vcvtq_s32_f32(vx); 80 const float32x4_t vprerndx = vcvtq_f32_s32(vintx); 81 uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold); 82 vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000))); 83 const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx); 84 uint32x4_t vadjmask = vcgeq_f32(vrndx, vx); 85 const float32x4_t vadjrndx = vaddq_f32(vrndx, vone); 86 vadjmask = vorrq_u32(vadjmask, vmovq_n_u32(UINT32_C(0x80000000))); 87 const float32x4_t vy = vbslq_f32(vadjmask, vrndx, vadjrndx); 88 float32x2_t vy_lo = vget_low_f32(vy); 89 if (n & (2 * sizeof(float))) { 90 vst1_f32(y, vy_lo); y += 2; 91 vy_lo = vget_high_f32(vy); 92 } 93 if (n & (1 * sizeof(float))) { 94 vst1_lane_f32(y, vy_lo, 0); 95 } 96 } 97} 98