1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <assert.h>
10 #include <stdint.h>
11 #include <stddef.h>
12
13 #include <wasm_simd128.h>
14
15 #include <xnnpack/math.h>
16 #include <xnnpack/requantization-stubs.h>
17
18
xnn_qu8_requantize_gemmlowp__wasmsimd(size_t n,const int32_t * input,float scale,uint8_t zero_point,uint8_t qmin,uint8_t qmax,uint8_t * output)19 void xnn_qu8_requantize_gemmlowp__wasmsimd(
20 size_t n,
21 const int32_t* input,
22 float scale,
23 uint8_t zero_point,
24 uint8_t qmin,
25 uint8_t qmax,
26 uint8_t* output)
27 {
28 assert(n % 16 == 0);
29 assert(scale < 1.0f);
30 assert(scale >= 0x1.0p-32f);
31
32 // Compute requantization parameters.
33 const uint32_t scale_bits = float_as_uint32(scale);
34
35 // Multiplier is in [0x40000000, 0x7FFFFF80] range.
36 const int32_t multiplier = (int32_t) (((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
37 assert(multiplier >= INT32_C(0x40000000));
38 assert(multiplier <= INT32_C(0x7FFFFF80));
39 const int64_t twice_multiplier = INT64_C(2) * (int64_t) multiplier;
40
41 // Shift is in [0, 31] range.
42 const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
43 assert(shift >= 0);
44 assert(shift < 32);
45
46 const v128_t vmultiplier = wasm_i64x2_make(twice_multiplier, twice_multiplier);
47 const v128_t vzero_point = wasm_i16x8_splat((int16_t) (uint16_t) zero_point);
48
49 const v128_t vqmin = wasm_i8x16_splat((int8_t) qmin);
50 const v128_t vqmax = wasm_i8x16_splat((int8_t) qmax);
51 const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
52 const v128_t vremainder_mask = wasm_i32x4_splat((int32_t) remainder_mask);
53 const v128_t vthreshold = wasm_i32x4_splat((int32_t) (remainder_mask >> 1));
54 const v128_t vtwice_q31rounding = wasm_i64x2_splat(INT64_C(0x80000000));
55 for (; n != 0; n -= 16) {
56 const v128_t x = wasm_v128_load(input);
57 const v128_t y = wasm_v128_load(input + 4);
58 const v128_t z = wasm_v128_load(input + 8);
59 const v128_t w = wasm_v128_load(input + 12);
60 input += 16;
61
62 const v128_t x_sign = wasm_i32x4_shr(x, 31);
63 const v128_t y_sign = wasm_i32x4_shr(y, 31);
64 const v128_t z_sign = wasm_i32x4_shr(z, 31);
65 const v128_t w_sign = wasm_i32x4_shr(w, 31);
66
67 const v128_t x_lo = wasm_v32x4_shuffle(x, x_sign, 0, 4, 1, 5);
68 const v128_t y_lo = wasm_v32x4_shuffle(y, y_sign, 0, 4, 1, 5);
69 const v128_t z_lo = wasm_v32x4_shuffle(z, z_sign, 0, 4, 1, 5);
70 const v128_t w_lo = wasm_v32x4_shuffle(w, w_sign, 0, 4, 1, 5);
71
72 const v128_t x_hi = wasm_v32x4_shuffle(x, x_sign, 2, 6, 3, 7);
73 const v128_t y_hi = wasm_v32x4_shuffle(y, y_sign, 2, 6, 3, 7);
74 const v128_t z_hi = wasm_v32x4_shuffle(z, z_sign, 2, 6, 3, 7);
75 const v128_t w_hi = wasm_v32x4_shuffle(w, w_sign, 2, 6, 3, 7);
76
77 const v128_t x_product_lo = wasm_i64x2_add(wasm_i64x2_mul(x_lo, vmultiplier), vtwice_q31rounding);
78 const v128_t y_product_lo = wasm_i64x2_add(wasm_i64x2_mul(y_lo, vmultiplier), vtwice_q31rounding);
79 const v128_t z_product_lo = wasm_i64x2_add(wasm_i64x2_mul(z_lo, vmultiplier), vtwice_q31rounding);
80 const v128_t w_product_lo = wasm_i64x2_add(wasm_i64x2_mul(w_lo, vmultiplier), vtwice_q31rounding);
81
82 const v128_t x_product_hi = wasm_i64x2_add(wasm_i64x2_mul(x_hi, vmultiplier), vtwice_q31rounding);
83 const v128_t y_product_hi = wasm_i64x2_add(wasm_i64x2_mul(y_hi, vmultiplier), vtwice_q31rounding);
84 const v128_t z_product_hi = wasm_i64x2_add(wasm_i64x2_mul(z_hi, vmultiplier), vtwice_q31rounding);
85 const v128_t w_product_hi = wasm_i64x2_add(wasm_i64x2_mul(w_hi, vmultiplier), vtwice_q31rounding);
86
87 const v128_t x_q31product = wasm_v32x4_shuffle(x_product_lo, x_product_hi, 1, 3, 5, 7);
88 const v128_t y_q31product = wasm_v32x4_shuffle(y_product_lo, y_product_hi, 1, 3, 5, 7);
89 const v128_t z_q31product = wasm_v32x4_shuffle(z_product_lo, z_product_hi, 1, 3, 5, 7);
90 const v128_t w_q31product = wasm_v32x4_shuffle(w_product_lo, w_product_hi, 1, 3, 5, 7);
91
92 const v128_t x_remainder =
93 wasm_i32x4_add(wasm_v128_and(x_q31product, vremainder_mask), wasm_i32x4_shr(x_q31product, 31));
94 const v128_t y_remainder =
95 wasm_i32x4_add(wasm_v128_and(y_q31product, vremainder_mask), wasm_i32x4_shr(y_q31product, 31));
96 const v128_t z_remainder =
97 wasm_i32x4_add(wasm_v128_and(z_q31product, vremainder_mask), wasm_i32x4_shr(z_q31product, 31));
98 const v128_t w_remainder =
99 wasm_i32x4_add(wasm_v128_and(w_q31product, vremainder_mask), wasm_i32x4_shr(w_q31product, 31));
100
101 const v128_t x_scaled =
102 wasm_i32x4_sub(wasm_i32x4_shr(x_q31product, shift), wasm_i32x4_gt(x_remainder, vthreshold));
103 const v128_t y_scaled =
104 wasm_i32x4_sub(wasm_i32x4_shr(y_q31product, shift), wasm_i32x4_gt(y_remainder, vthreshold));
105 const v128_t z_scaled =
106 wasm_i32x4_sub(wasm_i32x4_shr(z_q31product, shift), wasm_i32x4_gt(z_remainder, vthreshold));
107 const v128_t w_scaled =
108 wasm_i32x4_sub(wasm_i32x4_shr(w_q31product, shift), wasm_i32x4_gt(w_remainder, vthreshold));
109
110 const v128_t xy_packed = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(x_scaled, y_scaled), vzero_point);
111 const v128_t zw_packed = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(z_scaled, w_scaled), vzero_point);
112 const v128_t xyzw_packed = wasm_u8x16_narrow_i16x8(xy_packed, zw_packed);
113 const v128_t xyzw_clamped = wasm_u8x16_min(wasm_u8x16_max(xyzw_packed, vqmin), vqmax);
114
115 // 12x v128.shuffle
116 // 8x i32x4.lt
117 // 8x i64x2.add
118 // 8x i64x2.mul
119 // 4x v128.and
120 // 4x i32x4.add
121 // 4x i32x4.sub
122 // 4x i32x4.gt
123 // 4x i32x4.shr_s
124 // 2x i16x8.narrow_i32x4_s
125 // 2x i16x8.add_saturate_s
126 // 1x i8x16.narrow_i16x8_u
127 // 1x i8x16.max_u
128 // 1x i8x16.min_u
129 // ---------------------
130 // 63 instructions total
131
132 wasm_v128_store(output, xyzw_clamped);
133 output += 16;
134 }
135 }
136