xref: /aosp_15_r20/external/XNNPACK/src/qu8-requantization/rndna-neon.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stddef.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/math.h>
16 #include <xnnpack/requantization-stubs.h>
17 
18 
xnn_qu8_requantize_rndna__neon(size_t n,const int32_t * input,float scale,uint8_t zero_point,uint8_t qmin,uint8_t qmax,uint8_t * output)19 void xnn_qu8_requantize_rndna__neon(
20     size_t n,
21     const int32_t* input,
22     float scale,
23     uint8_t zero_point,
24     uint8_t qmin,
25     uint8_t qmax,
26     uint8_t* output)
27 {
28   assert(n % 16 == 0);
29   assert(scale < 1.0f);
30   assert(scale >= 0x1.0p-32f);
31 
32   const uint32_t scale_bits = float_as_uint32(scale);
33   const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
34   const int32_t shift = 127 + 23 - (scale_bits >> 23);
35   assert(shift >= 24);
36   assert(shift < 56);
37 
38 #if defined(__aarch64__)
39   const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
40 #else
41   const int32x2_t vmultiplier = vdup_n_s32(multiplier);
42 #endif
43   const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t) zero_point);
44   const int64x2_t vshift = vdupq_n_s64(-shift);
45   const uint8x16_t vqmin = vdupq_n_u8(qmin);
46   const uint8x16_t vqmax = vdupq_n_u8(qmax);
47   for (; n != 0; n -= 16) {
48     const int32x4_t x = vld1q_s32(input);
49     const int32x4_t y = vld1q_s32(input + 4);
50     const int32x4_t z = vld1q_s32(input + 8);
51     const int32x4_t w = vld1q_s32(input + 12);
52     input += 16;
53 
54     const uint32x4_t x_neg_mask = vcltq_s32(x, vmovq_n_s32(0));
55     const uint32x4_t y_neg_mask = vcltq_s32(y, vmovq_n_s32(0));
56     const uint32x4_t z_neg_mask = vcltq_s32(z, vmovq_n_s32(0));
57     const uint32x4_t w_neg_mask = vcltq_s32(w, vmovq_n_s32(0));
58 
59 #if defined(__aarch64__)
60     const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier));
61     const int64x2_t x23_product = vmull_high_s32(x, vmultiplier);
62     const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier));
63     const int64x2_t y23_product = vmull_high_s32(y, vmultiplier);
64     const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier));
65     const int64x2_t z23_product = vmull_high_s32(z, vmultiplier);
66     const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier));
67     const int64x2_t w23_product = vmull_high_s32(w, vmultiplier);
68 #else
69     const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier);
70     const int64x2_t x23_product = vmull_s32(vget_high_s32(x), vmultiplier);
71     const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vmultiplier);
72     const int64x2_t y23_product = vmull_s32(vget_high_s32(y), vmultiplier);
73     const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vmultiplier);
74     const int64x2_t z23_product = vmull_s32(vget_high_s32(z), vmultiplier);
75     const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vmultiplier);
76     const int64x2_t w23_product = vmull_s32(vget_high_s32(w), vmultiplier);
77 #endif
78 
79 #if defined(__aarch64__)
80     const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
81     const int64x2_t x23_adjusted_product = vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mask));
82     const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
83     const int64x2_t y23_adjusted_product = vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mask));
84     const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
85     const int64x2_t z23_adjusted_product = vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mask));
86     const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
87     const int64x2_t w23_adjusted_product = vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mask));
88 #else
89     const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
90     const int64x2_t x23_adjusted_product = vaddw_s32(x23_product, vreinterpret_s32_u32(vget_high_u32(x_neg_mask)));
91     const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
92     const int64x2_t y23_adjusted_product = vaddw_s32(y23_product, vreinterpret_s32_u32(vget_high_u32(y_neg_mask)));
93     const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
94     const int64x2_t z23_adjusted_product = vaddw_s32(z23_product, vreinterpret_s32_u32(vget_high_u32(z_neg_mask)));
95     const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
96     const int64x2_t w23_adjusted_product = vaddw_s32(w23_product, vreinterpret_s32_u32(vget_high_u32(w_neg_mask)));
97 #endif
98 
99     const int64x2_t x01_scaled = vrshlq_s64(x01_adjusted_product, vshift);
100     const int64x2_t x23_scaled = vrshlq_s64(x23_adjusted_product, vshift);
101     const int64x2_t y01_scaled = vrshlq_s64(y01_adjusted_product, vshift);
102     const int64x2_t y23_scaled = vrshlq_s64(y23_adjusted_product, vshift);
103     const int64x2_t z01_scaled = vrshlq_s64(z01_adjusted_product, vshift);
104     const int64x2_t z23_scaled = vrshlq_s64(z23_adjusted_product, vshift);
105     const int64x2_t w01_scaled = vrshlq_s64(w01_adjusted_product, vshift);
106     const int64x2_t w23_scaled = vrshlq_s64(w23_adjusted_product, vshift);
107 
108 #ifdef __aarch64__
109     const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23_scaled));
110     const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23_scaled));
111     const int32x4_t z_scaled = vuzp1q_s32(vreinterpretq_s32_s64(z01_scaled), vreinterpretq_s32_s64(z23_scaled));
112     const int32x4_t w_scaled = vuzp1q_s32(vreinterpretq_s32_s64(w01_scaled), vreinterpretq_s32_s64(w23_scaled));
113 
114     const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
115     const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
116     const uint8x16_t xyzw_packed = vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
117 #else
118     const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled));
119     const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled));
120     const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled));
121     const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled));
122 
123     const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
124     const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
125     const uint8x16_t xyzw_packed = vcombine_u8(vqmovun_s16(xy_packed), vqmovun_s16(zw_packed));
126 #endif
127 
128     const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
129 
130     // AArch32 version:
131     //   4x VCLT.S32 Qd, Qm, #0
132     //   8x VMULL.S32 Qd, Dm, Dn
133     //   8x VADDW.S32 Qd, Qm, Dn
134     //   8x VRSHL.S32 Qd, Qm, Qn
135     //   8x VMOVN.S64 Dd, Qm
136     //   4x VQMOVN.S32 Dd, Qm
137     //   2x VQADD.S16 Qd, Qm, Qn
138     //   2x VQMOVUN.S16 Dd, Qm
139     //   1x VMAX.U8 Qd, Qm, Qn
140     //   1x VMIN.U8 Qd, Qm, Qn
141     // ---------------------
142     // 46 instructions total
143     //
144     // AArch64 version:
145     //   4x CMLT Vd.4S, Vn.4S, #0
146     //   4x SMULL Vd.2D, Vn.2S, Vm.2S
147     //   4x SMULL2 Vd.2D, Vn.4S, Vm.4S
148     //   4x SADDW Vd.2D, Vn.2D, Vm.2S
149     //   4x SADDW2 Vd.2D, Vn.2D, Vm.4S
150     //   8x SRSHL Vd.2D, Vn.2D, Vm.2D
151     //   4x UZP1 Vd.4S, Vn.4S, Vm.4S
152     //   2x SQXTN Vd.4H, Vn.4S
153     //   2x SQXTN2 Vd.8H, Vn.4S
154     //   2x SQADD Vd.8H, Vn.8H, Vm.8H
155     //   1x SQXTUN Vd.8B, Vn.8H
156     //   1x SQXTUN2 Vd.16B, Vn.8H
157     //   1x UMIN Vd.16B, Vn.16B, Vm.16B
158     //   1x UMAX Vd.16B, Vn.16B, Vm.16B
159     // ---------------------
160     // 42 instructions total
161 
162     vst1q_u8(output, xyzw_clamped);
163     output += 16;
164   }
165 }
166