xref: /aosp_15_r20/external/XNNPACK/src/qs8-vlrelu/sse2.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2022 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE >= 16
7$assert BATCH_TILE % 16 == 0
8$SIMD_TILE = BATCH_TILE // 16
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <emmintrin.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vlrelu.h>
16#include <xnnpack/unaligned.h>
17
18
19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
20$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE]
21void xnn_${DATATYPE.lower()}_vlrelu_ukernel__sse2_x${BATCH_TILE}(
22    size_t n,
23    const ${XINT8_T}* x,
24    ${XINT8_T}* y,
25    const union xnn_${DATATYPE.lower()}_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
26{
27  assert(n != 0);
28  assert(n % sizeof(${XINT8_T}) == 0);
29  assert(x != NULL);
30  assert(y != NULL);
31
32  const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
33  const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
34  const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
35  const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
36  const __m128i vzero = _mm_setzero_si128();
37  $if BATCH_TILE > 16:
38    for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
39      const __m128i vx${ABC[0]} = _mm_loadu_si128((const __m128i*) x);
40      $for N in range(1, SIMD_TILE):
41        const __m128i vx${ABC[N]} = _mm_loadu_si128((const __m128i*) (x + ${N * 16}));
42      x += ${BATCH_TILE};
43
44      $for N in range(SIMD_TILE):
45        $if DATATYPE == "QU8":
46          __m128i vextx${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vzero);
47          __m128i vextx${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vzero);
48        $else:
49          const __m128i vm${ABC[N]} = _mm_cmpgt_epi8(_mm_setzero_si128(), vx${ABC[N]});
50          __m128i vextx${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vm${ABC[N]});
51          __m128i vextx${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vm${ABC[N]});
52
53      $for N in range(2*SIMD_TILE):
54        __m128i vmultiplier${ABC[N]} = _mm_cmpgt_epi16(vextx${ABC[N]}, vinput_zero_point);
55        vextx${ABC[N]} = _mm_sub_epi16(vinput_zero_point, vextx${ABC[N]});
56
57      $for N in range(2*SIMD_TILE):
58        vmultiplier${ABC[N]} = _mm_and_si128(vmultiplier${ABC[N]}, vmultiplier_diff);
59
60      $for N in range(2*SIMD_TILE):
61        vmultiplier${ABC[N]} = _mm_xor_si128(vmultiplier${ABC[N]}, vmultiplier_base);
62
63      $for N in range(2*SIMD_TILE):
64        __m128i vprodlo${ABC[N]} = _mm_mullo_epi16(vextx${ABC[N]}, vmultiplier${ABC[N]});
65
66      $for N in range(2*SIMD_TILE):
67        vprodlo${ABC[N]} = _mm_srli_epi16(vprodlo${ABC[N]}, 7);
68        __m128i vprodhi${ABC[N]} = _mm_mulhi_epi16(vextx${ABC[N]}, vmultiplier${ABC[N]});
69
70      $for N in range(2*SIMD_TILE):
71        vprodhi${ABC[N]} = _mm_slli_epi16(vprodhi${ABC[N]}, 8);
72        vprodlo${ABC[N]} = _mm_avg_epu16(vprodlo${ABC[N]}, vzero);
73
74      $for N in range(2*SIMD_TILE):
75        __m128i vacc${ABC[N]} = _mm_add_epi16(vprodlo${ABC[N]}, vprodhi${ABC[N]});
76
77      $for N in range(2*SIMD_TILE):
78        vacc${ABC[N]} = _mm_adds_epi16(vacc${ABC[N]}, voutput_zero_point);
79
80      $for N in range(SIMD_TILE):
81        const __m128i vy${ABC[N]} = ${_MM_PACKXS_EPI16}(vacc${ABC[2*N]}, vacc${ABC[2*N+1]});
82
83      _mm_storeu_si128((__m128i*) y, vy${ABC[0]});
84      $for N in range(1, SIMD_TILE):
85        _mm_storeu_si128((__m128i*) (y + ${N * 16}), vy${ABC[N]});
86      y += ${BATCH_TILE};
87    }
88  for (; n >= 16 * sizeof(${XINT8_T}); n -= 16 * sizeof(${XINT8_T})) {
89    const __m128i vx = _mm_loadu_si128((const __m128i*) x);
90    x += 16;
91
92    $if DATATYPE == "QU8":
93      __m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
94      __m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
95    $else:
96      const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
97      __m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
98      __m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
99
100    __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
101    __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
102    vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
103    vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
104
105    vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
106    vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
107
108    vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
109    vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
110
111    __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
112    __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
113
114    vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
115    vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
116    __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
117    __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
118
119    vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
120    vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
121    vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
122    vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
123
124    __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
125    __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
126
127    vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
128    vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
129
130    const __m128i vy = ${_MM_PACKXS_EPI16}(vacc0, vacc1);
131    _mm_storeu_si128((__m128i*) y, vy);
132    y += 16;
133  }
134  if XNN_UNLIKELY(n != 0) {
135    assert(n >= 1 * sizeof(${XINT8_T}));
136    assert(n <= 15 * sizeof(${XINT8_T}));
137
138    const __m128i vx = _mm_loadu_si128((const __m128i*) x);
139
140    $if DATATYPE == "QU8":
141      __m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
142      __m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
143    $else:
144      const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
145      __m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
146      __m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
147
148    __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
149    __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
150    vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
151    vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
152
153    vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
154    vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
155
156    vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
157    vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
158
159    __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
160    __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
161
162    vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
163    vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
164    __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
165    __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
166
167    vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
168    vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
169    vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
170    vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
171
172    __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
173    __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
174
175    vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
176    vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
177
178    __m128i vy = ${_MM_PACKXS_EPI16}(vacc0, vacc1);
179    if (n & (8 * sizeof(${XINT8_T}))) {
180      _mm_storel_epi64((__m128i*) y, vy);
181      vy = _mm_unpackhi_epi64(vy, vy);
182      y += 8;
183    }
184    if (n & (4 * sizeof(${XINT8_T}))) {
185      unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
186      vy = _mm_srli_epi64(vy, 32);
187      y += 4;
188    }
189    uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
190    if (n & (2 * sizeof(${XINT8_T}))) {
191      unaligned_store_u16(y, (uint16_t) vy0);
192      vy0 >>= 16;
193      y += 2;
194    }
195    if (n & (1 * sizeof(${XINT8_T}))) {
196      *y = (${XINT8_T}) vy0;
197    }
198  }
199}
200