xref: /aosp_15_r20/external/XNNPACK/src/qs8-vadd/wasmsimd.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert DATATYPE in ["QS8", "QU8"]
7$assert BATCH_TILE % 8 == 0
8$assert BATCH_TILE >= 8
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/vadd.h>
15
16
17$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
18$WASM_X16X8_LOAD8X8 = {"QS8": "wasm_i16x8_load8x8", "QU8": "wasm_u16x8_load8x8"}[DATATYPE]
19$WASM_X32X4_EXTEND_LOW_X16X8 = {"QS8": "wasm_i32x4_extend_low_i16x8", "QU8": "wasm_u32x4_extend_low_u16x8"}[DATATYPE]
20$WASM_X32X4_EXTEND_HIGH_X16X8 = {"QS8": "wasm_i32x4_extend_high_i16x8", "QU8": "wasm_u32x4_extend_high_u16x8"}[DATATYPE]
21$WASM_X8X16_NARROW_I16X8 = {"QS8": "wasm_i8x16_narrow_i16x8", "QU8": "wasm_u8x16_narrow_i16x8"}[DATATYPE]
22$WASM_X8X16_MIN = {"QS8": "wasm_i8x16_min", "QU8": "wasm_u8x16_min"}[DATATYPE]
23$WASM_X8X16_MAX = {"QS8": "wasm_i8x16_max", "QU8": "wasm_u8x16_max"}[DATATYPE]
24void xnn_${DATATYPE.lower()}_vadd_minmax_ukernel__wasmsimd_x${BATCH_TILE}(
25    size_t n,
26    const ${XINT8_T}* input_a,
27    const ${XINT8_T}* input_b,
28    ${XINT8_T}* output,
29    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30{
31  const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
32  const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
33  const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
34  const uint32_t vshift = params->wasmsimd.shift;
35  const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
36  const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
37  const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
38
39  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
40    const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
41    const v128_t vb${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_b);
42    $for N in range(8, BATCH_TILE, 8):
43      const v128_t va${ABC[N:N+8]} = ${WASM_X16X8_LOAD8X8}(input_a + ${N});
44      const v128_t vb${ABC[N:N+8]} = ${WASM_X16X8_LOAD8X8}(input_b + ${N});
45    input_a += ${BATCH_TILE};
46    input_b += ${BATCH_TILE};
47
48    $for N in range(0, BATCH_TILE, 8):
49      v128_t vacc${ABC[N:N+4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(va${ABC[N:N+8]}), va_multiplier));
50      v128_t vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(va${ABC[N:N+8]}), va_multiplier));
51
52    $for N in range(0, BATCH_TILE, 8):
53      vacc${ABC[N:N+4]} = wasm_i32x4_add(vacc${ABC[N:N+4]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(vb${ABC[N:N+8]}), vb_multiplier));
54      vacc${ABC[N+4:N+8]} = wasm_i32x4_add(vacc${ABC[N+4:N+8]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(vb${ABC[N:N+8]}), vb_multiplier));
55
56    $for N in range(0, BATCH_TILE, 4):
57      vacc${ABC[N:N+4]} = wasm_i32x4_shr(vacc${ABC[N:N+4]}, vshift);
58
59    $for N in range(0, BATCH_TILE, 8):
60      v128_t vout${ABC[N:N+8]} = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}), voutput_zero_point);
61
62    $for N in range(0, BATCH_TILE, 16):
63      $if N + 8 < BATCH_TILE:
64        v128_t vout${ABC[N:N+16]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
65      $else:
66        v128_t vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
67
68    $for N in range(0, BATCH_TILE, 16):
69      $if N + 8 < BATCH_TILE:
70        vout${ABC[N:N+16]} = ${WASM_X8X16_MAX}(vout${ABC[N:N+16]}, voutput_min);
71      $else:
72        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_MAX}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
73
74    $for N in range(0, BATCH_TILE, 16):
75      $if N + 8 < BATCH_TILE:
76        vout${ABC[N:N+16]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+16]}, voutput_max);
77      $else:
78        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
79
80    $if BATCH_TILE >= 16:
81      wasm_v128_store(output, vout${ABC[0:16]});
82    $else:
83      *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
84    $for N in range(16, BATCH_TILE, 16):
85      $if N + 8 < BATCH_TILE:
86        wasm_v128_store(output + ${N}, vout${ABC[N:N+16]});
87      $else:
88        *((double*) (output + ${N})) = wasm_f64x2_extract_lane(vout${ABC[N:N+8]}${ABC[N:N+8]}, 0);
89    output += ${BATCH_TILE};
90  }
91  if XNN_UNLIKELY(n != 0) {
92    ${"do " if BATCH_TILE > 8 else ""}{
93      const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
94      const v128_t vb${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_b);
95      $if BATCH_TILE > 8:
96        input_a += 8;
97        input_b += 8;
98
99      v128_t vacc${ABC[0:4]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(va${ABC[0:8]}), va_multiplier));
100      v128_t vacc${ABC[4:8]} = wasm_i32x4_add(vbias, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(va${ABC[0:8]}), va_multiplier));
101
102      vacc${ABC[0:4]} = wasm_i32x4_add(vacc${ABC[0:4]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_LOW_X16X8}(vb${ABC[0:8]}), vb_multiplier));
103      vacc${ABC[4:8]} = wasm_i32x4_add(vacc${ABC[4:8]}, wasm_i32x4_mul(${WASM_X32X4_EXTEND_HIGH_X16X8}(vb${ABC[0:8]}), vb_multiplier));
104
105      vacc${ABC[0:4]} = wasm_i32x4_shr(vacc${ABC[0:4]}, vshift);
106      vacc${ABC[4:8]} = wasm_i32x4_shr(vacc${ABC[4:8]}, vshift);
107
108      v128_t vout${ABC[0:8]} = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
109
110      v128_t vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[0:8]}, vout${ABC[0:8]});
111      vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MAX}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
112      vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MIN}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
113
114      $if BATCH_TILE > 8:
115        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
116          *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
117          output += 8;
118          n -= 8 * sizeof(${XINT8_T});
119        } else {
120          if (n & (4 * sizeof(${XINT8_T}))) {
121            *((float*) output) = (float) wasm_f32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
122            vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
123            output += 4;
124          }
125          uint32_t vout${ABC[0:4]} = wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
126          if (n & (2 * sizeof(${XINT8_T}))) {
127            *((uint16_t*) output) = (uint16_t) vout${ABC[0:4]};
128            vout${ABC[0:4]} >>= 16;
129            output += 2;
130          }
131          if (n & (1 * sizeof(${XINT8_T}))) {
132            *output = (${XINT8_T}) vout${ABC[0:4]};
133          }
134          n = 0;
135        }
136      $else:
137        if (n & (4 * sizeof(${XINT8_T}))) {
138          *((float*) output) = (float) wasm_f32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
139          vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
140          output += 4;
141        }
142        uint32_t vout${ABC[0:4]} = wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
143        if (n & (2 * sizeof(${XINT8_T}))) {
144          *((uint16_t*) output) = (uint16_t) vout${ABC[0:4]};
145          vout${ABC[0:4]} >>= 16;
146          output += 2;
147        }
148        if (n & (1 * sizeof(${XINT8_T}))) {
149          *output = (${XINT8_T}) vout${ABC[0:4]};
150        }
151    }${" while (n != 0);" if BATCH_TILE > 8 else ""}
152  }
153}
154