1 // Auto-generated file. Do not edit!
2 // Template: src/x8-lut/ssse3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/lut.h>
17
18
xnn_x8_lut_ukernel__avx_x32(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx_x32(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25 assert(n != 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const __m128i vt0 = _mm_load_si128((const __m128i*) t);
30 const __m128i vt1 = _mm_load_si128((const __m128i*) (t + 16));
31 const __m128i vt2 = _mm_load_si128((const __m128i*) (t + 32));
32 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48));
33 const __m128i vt4 = _mm_load_si128((const __m128i*) (t + 64));
34 const __m128i vt5 = _mm_load_si128((const __m128i*) (t + 80));
35 const __m128i vt6 = _mm_load_si128((const __m128i*) (t + 96));
36 const __m128i vt7 = _mm_load_si128((const __m128i*) (t + 112));
37 const __m128i vt8 = _mm_load_si128((const __m128i*) (t + 128));
38 const __m128i vt9 = _mm_load_si128((const __m128i*) (t + 144));
39 const __m128i vtA = _mm_load_si128((const __m128i*) (t + 160));
40 const __m128i vtB = _mm_load_si128((const __m128i*) (t + 176));
41 const __m128i vtC = _mm_load_si128((const __m128i*) (t + 192));
42 const __m128i vtD = _mm_load_si128((const __m128i*) (t + 208));
43 const __m128i vtE = _mm_load_si128((const __m128i*) (t + 224));
44 const __m128i vtF = _mm_load_si128((const __m128i*) (t + 240));
45
46 const __m128i vtable0 = vt0;
47 const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
48 const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
49 const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
50 const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
51 const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
52 const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
53 const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
54 const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
55 const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
56 const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
57 const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
58 const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
59 const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
60 const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
61 const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
62
63 const __m128i voffset = _mm_set1_epi8(16);
64 for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
65 __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
66 __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
67 x += 32;
68
69 __m128i vy0 = _mm_shuffle_epi8(vtable0, vx0);
70 __m128i vy1 = _mm_shuffle_epi8(vtable0, vx1);
71
72 vx0 = _mm_sub_epi8(vx0, voffset);
73 vx1 = _mm_sub_epi8(vx1, voffset);
74 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable1, vx0));
75 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable1, vx1));
76 vx0 = _mm_sub_epi8(vx0, voffset);
77 vx1 = _mm_sub_epi8(vx1, voffset);
78 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable2, vx0));
79 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable2, vx1));
80 vx0 = _mm_sub_epi8(vx0, voffset);
81 vx1 = _mm_sub_epi8(vx1, voffset);
82 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable3, vx0));
83 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable3, vx1));
84 vx0 = _mm_sub_epi8(vx0, voffset);
85 vx1 = _mm_sub_epi8(vx1, voffset);
86 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable4, vx0));
87 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable4, vx1));
88 vx0 = _mm_sub_epi8(vx0, voffset);
89 vx1 = _mm_sub_epi8(vx1, voffset);
90 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable5, vx0));
91 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable5, vx1));
92 vx0 = _mm_sub_epi8(vx0, voffset);
93 vx1 = _mm_sub_epi8(vx1, voffset);
94 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable6, vx0));
95 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable6, vx1));
96 vx0 = _mm_sub_epi8(vx0, voffset);
97 vx1 = _mm_sub_epi8(vx1, voffset);
98 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable7, vx0));
99 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable7, vx1));
100 vx0 = _mm_sub_epi8(vx0, voffset);
101 vx1 = _mm_sub_epi8(vx1, voffset);
102 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable8, vx0));
103 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable8, vx1));
104
105 vx0 = _mm_subs_epi8(vx0, voffset);
106 vx1 = _mm_subs_epi8(vx1, voffset);
107 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable9, vx0));
108 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable9, vx1));
109 vx0 = _mm_subs_epi8(vx0, voffset);
110 vx1 = _mm_subs_epi8(vx1, voffset);
111 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableA, vx0));
112 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableA, vx1));
113 vx0 = _mm_subs_epi8(vx0, voffset);
114 vx1 = _mm_subs_epi8(vx1, voffset);
115 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableB, vx0));
116 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableB, vx1));
117 vx0 = _mm_subs_epi8(vx0, voffset);
118 vx1 = _mm_subs_epi8(vx1, voffset);
119 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableC, vx0));
120 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableC, vx1));
121 vx0 = _mm_subs_epi8(vx0, voffset);
122 vx1 = _mm_subs_epi8(vx1, voffset);
123 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableD, vx0));
124 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableD, vx1));
125 vx0 = _mm_subs_epi8(vx0, voffset);
126 vx1 = _mm_subs_epi8(vx1, voffset);
127 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableE, vx0));
128 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableE, vx1));
129 vx0 = _mm_subs_epi8(vx0, voffset);
130 vx1 = _mm_subs_epi8(vx1, voffset);
131 vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableF, vx0));
132 vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableF, vx1));
133
134 _mm_storeu_si128((__m128i*) y, vy0);
135 _mm_storeu_si128((__m128i*) (y + 16), vy1);
136 y += 32;
137 }
138 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
139 __m128i vx = _mm_loadu_si128((const __m128i*) x);
140 x += 16;
141
142 __m128i vy = _mm_shuffle_epi8(vtable0, vx);
143
144 vx = _mm_sub_epi8(vx, voffset);
145 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
146 vx = _mm_sub_epi8(vx, voffset);
147 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
148 vx = _mm_sub_epi8(vx, voffset);
149 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
150 vx = _mm_sub_epi8(vx, voffset);
151 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
152 vx = _mm_sub_epi8(vx, voffset);
153 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
154 vx = _mm_sub_epi8(vx, voffset);
155 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
156 vx = _mm_sub_epi8(vx, voffset);
157 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
158 vx = _mm_sub_epi8(vx, voffset);
159 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
160
161 vx = _mm_subs_epi8(vx, voffset);
162 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
163 vx = _mm_subs_epi8(vx, voffset);
164 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
165 vx = _mm_subs_epi8(vx, voffset);
166 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
167 vx = _mm_subs_epi8(vx, voffset);
168 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
169 vx = _mm_subs_epi8(vx, voffset);
170 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
171 vx = _mm_subs_epi8(vx, voffset);
172 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
173 vx = _mm_subs_epi8(vx, voffset);
174 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
175
176 _mm_storeu_si128((__m128i*) y, vy);
177 y += 16;
178 }
179 if XNN_UNLIKELY(n != 0) {
180 __m128i vx = _mm_loadu_si128((const __m128i*) x);
181
182 __m128i vy = _mm_shuffle_epi8(vtable0, vx);
183
184 vx = _mm_sub_epi8(vx, voffset);
185 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
186 vx = _mm_sub_epi8(vx, voffset);
187 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
188 vx = _mm_sub_epi8(vx, voffset);
189 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
190 vx = _mm_sub_epi8(vx, voffset);
191 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
192 vx = _mm_sub_epi8(vx, voffset);
193 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
194 vx = _mm_sub_epi8(vx, voffset);
195 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
196 vx = _mm_sub_epi8(vx, voffset);
197 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
198 vx = _mm_sub_epi8(vx, voffset);
199 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
200
201 vx = _mm_subs_epi8(vx, voffset);
202 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
203 vx = _mm_subs_epi8(vx, voffset);
204 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
205 vx = _mm_subs_epi8(vx, voffset);
206 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
207 vx = _mm_subs_epi8(vx, voffset);
208 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
209 vx = _mm_subs_epi8(vx, voffset);
210 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
211 vx = _mm_subs_epi8(vx, voffset);
212 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
213 vx = _mm_subs_epi8(vx, voffset);
214 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
215
216 if (n & (8 * sizeof(uint8_t))) {
217 _mm_storel_epi64((__m128i*) y, vy);
218 vy = _mm_unpackhi_epi64(vy, vy);
219 y += 8;
220 }
221 if (n & (4 * sizeof(uint8_t))) {
222 _mm_storeu_si32(y, vy);
223 vy = _mm_srli_epi64(vy, 32);
224 y += 4;
225 }
226 if (n & (2 * sizeof(uint8_t))) {
227 _mm_storeu_si16(y, vy);
228 vy = _mm_srli_epi32(vy, 16);
229 y += 2;
230 }
231 if (n & (1 * sizeof(uint8_t))) {
232 *y = (uint8_t) _mm_extract_epi8(vy, 0);
233 }
234 }
235 }
236