1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stddef.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/requantization-stubs.h>
17
18
xnn_qu8_requantize_fp32__neon(size_t n,const int32_t * input,float scale,uint8_t zero_point,uint8_t qmin,uint8_t qmax,uint8_t * output)19 void xnn_qu8_requantize_fp32__neon(
20 size_t n,
21 const int32_t* input,
22 float scale,
23 uint8_t zero_point,
24 uint8_t qmin,
25 uint8_t qmax,
26 uint8_t* output)
27 {
28 assert(n % 16 == 0);
29 assert(scale < 1.0f);
30 assert(scale >= 0x1.0p-32f);
31
32 const float32x4_t vscale = vdupq_n_f32(scale);
33 #ifdef __aarch64__
34 const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t) zero_point);
35 const uint8x16_t vqmin = vdupq_n_u8(qmin);
36 const uint8x16_t vqmax = vdupq_n_u8(qmax);
37 #else
38 const float32x4_t vfmin = vdupq_n_f32((float) ((int32_t)(uint32_t) qmin - (int32_t)(uint32_t) zero_point));
39 const float32x4_t vfmax = vdupq_n_f32((float) ((int32_t)(uint32_t) qmax - (int32_t)(uint32_t) zero_point));
40 const float32x4_t vfmagic = vdupq_n_f32(12582912.0f);
41 const int32x4_t vimagic = vdupq_n_s32(INT32_C(0x4B400000) - (int32_t)(uint32_t) zero_point);
42 #endif
43 for (; n != 0; n -= 16) {
44 const int32x4_t x = vld1q_s32(input);
45 const int32x4_t y = vld1q_s32(input + 4);
46 const int32x4_t z = vld1q_s32(input + 8);
47 const int32x4_t w = vld1q_s32(input + 12);
48 input += 16;
49
50 // Convert int32_t input to FP32 and multiply by FP32 scale.
51 // Both operations involve statistically unbiased roundings:
52 // - Large int32_t values can't be exactly represented as FP32. The conversion instruction in ARM NEON would
53 // round it to nearest FP32 value with ties to even.
54 // - Product of two FP32 values is generally not exactly representation as an FP32 value, and will be rounded
55 // to nearest FP32 value with ties to even.
56 const float32x4_t x_scaled = vmulq_f32(vcvtq_f32_s32(x), vscale);
57 const float32x4_t y_scaled = vmulq_f32(vcvtq_f32_s32(y), vscale);
58 const float32x4_t z_scaled = vmulq_f32(vcvtq_f32_s32(z), vscale);
59 const float32x4_t w_scaled = vmulq_f32(vcvtq_f32_s32(w), vscale);
60
61 #ifdef __aarch64__
62 // Leverage "Floating-point Convert to Signed integer, rounding to nearest with ties to even" instruction.
63 // This is an ARMv8 instruction (always available in AArch64), which saturates result on overflow.
64 // We don't need to specifically consider saturated results, they will be clamped at the last stage.
65 const int32x4_t x_rounded = vcvtnq_s32_f32(x_scaled);
66 const int32x4_t y_rounded = vcvtnq_s32_f32(y_scaled);
67 const int32x4_t z_rounded = vcvtnq_s32_f32(z_scaled);
68 const int32x4_t w_rounded = vcvtnq_s32_f32(w_scaled);
69
70 // Standard final sequence on ARM NEON:
71 // - Pack to int16_t and saturate
72 // - Add zero point
73 // - Pack to uint8_t and saturate
74 // - Clamp between qmin and qmax
75 const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_rounded), y_rounded), vzero_point);
76 const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_rounded), w_rounded), vzero_point);
77 const uint8x16_t xyzw_packed = vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
78 const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
79
80 vst1q_u8(output, xyzw_clamped);
81 output += 16;
82 #else
83 // ARMv7 NEON offers only a floating-point to integer conversion instruction with rounding towards zero.
84 // In lieu of conversion instruction with rounding-to-nearest-even, we use a magic trick of adding a large
85 // number (1.5 * 2**23) to scaled value to cause rounding to integer, and then substracing this magic number as
86 // integer. This trick works only in a limited range (absolute value of input must be less than 2**22), so
87 // generally we have to clamp input to this range before using the magic. However, clamping to any smaller range
88 // works just as well, and thus we clamp to [qmin - zero point, qmax - zero point] range so that after we add
89 // zero point to the result, it gets into target [qmin, qmax] range.
90 const float32x4_t x_clamped = vminq_f32(vmaxq_f32(x_scaled, vfmin), vfmax);
91 const float32x4_t y_clamped = vminq_f32(vmaxq_f32(y_scaled, vfmin), vfmax);
92 const float32x4_t z_clamped = vminq_f32(vmaxq_f32(z_scaled, vfmin), vfmax);
93 const float32x4_t w_clamped = vminq_f32(vmaxq_f32(w_scaled, vfmin), vfmax);
94
95 // Conversion to integer using the "magic trick". Rounding is performed in the output of addition operation,
96 // and result is rounded to nearest even integer with ties to even.
97 const int32x4_t x_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(x_clamped, vfmagic)), vimagic);
98 const int32x4_t y_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(y_clamped, vfmagic)), vimagic);
99 const int32x4_t z_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(z_clamped, vfmagic)), vimagic);
100 const int32x4_t w_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(w_clamped, vfmagic)), vimagic);
101
102 // Select low 8 bits of each 32-bit integer in the vectors for the output.
103 // Since result is already clamped to [qmin, qmax] subrange of [0, 255], saturation is not needed.
104 const int16x8_t xy_packed = vcombine_s16(vmovn_s32(x_biased), vmovn_s32(y_biased));
105 const int16x8_t zw_packed = vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased));
106 const uint8x16_t xyzw_packed = vreinterpretq_u8_s8(vcombine_s8(vmovn_s16(xy_packed), vmovn_s16(zw_packed)));
107
108 // AArch32 version:
109 // 4x VCVT.F32.S32 Qd, Qm
110 // 4x VMUL.F32 Qd, Qm, Qn
111 // 4x VMIN.F32 Qd, Qm, Qn
112 // 4x VMAX.F32 Qd, Qm, Qn
113 // 4x VADD.F32 Qd, Qm, Qn
114 // 4x VSUB.S32 Qd, Qm, Qn
115 // 4x VMOVN.I32 Dd, Qm
116 // 2x VMOVN.I16 Dd, Qm
117 // ---------------------
118 // 30 instructions total
119 //
120 // AArch64 version:
121 // 4x SCVTF Vd.4S, Vn.4S
122 // 4x FMUL Vd.4S, Vn.4S, Vm.4S
123 // 4x FCVTNS Vd.4S, Vn.4S
124 // 2x SQXTN Vd.4H, Vn.4S
125 // 2x SQXTN2 Vd.8H, Vn.4S
126 // 2x SQADD Vd.8H, Vn.8H, Vm.8H
127 // 1x SQXTUN Vd.8B, Vn.8H
128 // 1x SQXTUN2 Vd.16B, Vn.8H
129 // 1x UMIN Vd.16B, Vn.16B, Vm.16B
130 // 1x UMAX Vd.16B, Vn.16B, Vm.16B
131 // ---------------------
132 // 22 instructions total
133
134 vst1q_u8(output, xyzw_packed);
135 output += 16;
136 #endif
137 }
138 }
139