1 // Auto-generated file. Do not edit!
2 // Template: src/x8-lut/ssse3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <tmmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_x8_lut_ukernel__ssse3_x16(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__ssse3_x16(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25 assert(n != 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const __m128i vt0 = _mm_load_si128((const __m128i*) t);
30 const __m128i vt1 = _mm_load_si128((const __m128i*) (t + 16));
31 const __m128i vt2 = _mm_load_si128((const __m128i*) (t + 32));
32 const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48));
33 const __m128i vt4 = _mm_load_si128((const __m128i*) (t + 64));
34 const __m128i vt5 = _mm_load_si128((const __m128i*) (t + 80));
35 const __m128i vt6 = _mm_load_si128((const __m128i*) (t + 96));
36 const __m128i vt7 = _mm_load_si128((const __m128i*) (t + 112));
37 const __m128i vt8 = _mm_load_si128((const __m128i*) (t + 128));
38 const __m128i vt9 = _mm_load_si128((const __m128i*) (t + 144));
39 const __m128i vtA = _mm_load_si128((const __m128i*) (t + 160));
40 const __m128i vtB = _mm_load_si128((const __m128i*) (t + 176));
41 const __m128i vtC = _mm_load_si128((const __m128i*) (t + 192));
42 const __m128i vtD = _mm_load_si128((const __m128i*) (t + 208));
43 const __m128i vtE = _mm_load_si128((const __m128i*) (t + 224));
44 const __m128i vtF = _mm_load_si128((const __m128i*) (t + 240));
45
46 const __m128i vtable0 = vt0;
47 const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
48 const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
49 const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
50 const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
51 const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
52 const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
53 const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
54 const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
55 const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
56 const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
57 const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
58 const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
59 const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
60 const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
61 const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
62
63 const __m128i voffset = _mm_set1_epi8(16);
64 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
65 __m128i vx = _mm_loadu_si128((const __m128i*) x);
66 x += 16;
67
68 __m128i vy = _mm_shuffle_epi8(vtable0, vx);
69
70 vx = _mm_sub_epi8(vx, voffset);
71 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
72 vx = _mm_sub_epi8(vx, voffset);
73 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
74 vx = _mm_sub_epi8(vx, voffset);
75 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
76 vx = _mm_sub_epi8(vx, voffset);
77 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
78 vx = _mm_sub_epi8(vx, voffset);
79 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
80 vx = _mm_sub_epi8(vx, voffset);
81 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
82 vx = _mm_sub_epi8(vx, voffset);
83 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
84 vx = _mm_sub_epi8(vx, voffset);
85 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
86
87 vx = _mm_subs_epi8(vx, voffset);
88 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
89 vx = _mm_subs_epi8(vx, voffset);
90 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
91 vx = _mm_subs_epi8(vx, voffset);
92 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
93 vx = _mm_subs_epi8(vx, voffset);
94 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
95 vx = _mm_subs_epi8(vx, voffset);
96 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
97 vx = _mm_subs_epi8(vx, voffset);
98 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
99 vx = _mm_subs_epi8(vx, voffset);
100 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
101
102 _mm_storeu_si128((__m128i*) y, vy);
103 y += 16;
104 }
105 if XNN_UNLIKELY(n != 0) {
106 __m128i vx = _mm_loadu_si128((const __m128i*) x);
107
108 __m128i vy = _mm_shuffle_epi8(vtable0, vx);
109
110 vx = _mm_sub_epi8(vx, voffset);
111 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
112 vx = _mm_sub_epi8(vx, voffset);
113 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
114 vx = _mm_sub_epi8(vx, voffset);
115 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
116 vx = _mm_sub_epi8(vx, voffset);
117 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
118 vx = _mm_sub_epi8(vx, voffset);
119 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
120 vx = _mm_sub_epi8(vx, voffset);
121 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
122 vx = _mm_sub_epi8(vx, voffset);
123 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
124 vx = _mm_sub_epi8(vx, voffset);
125 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
126
127 vx = _mm_subs_epi8(vx, voffset);
128 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
129 vx = _mm_subs_epi8(vx, voffset);
130 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
131 vx = _mm_subs_epi8(vx, voffset);
132 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
133 vx = _mm_subs_epi8(vx, voffset);
134 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
135 vx = _mm_subs_epi8(vx, voffset);
136 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
137 vx = _mm_subs_epi8(vx, voffset);
138 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
139 vx = _mm_subs_epi8(vx, voffset);
140 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
141
142 if (n & (8 * sizeof(uint8_t))) {
143 _mm_storel_epi64((__m128i*) y, vy);
144 vy = _mm_unpackhi_epi64(vy, vy);
145 y += 8;
146 }
147 if (n & (4 * sizeof(uint8_t))) {
148 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
149 vy = _mm_srli_epi64(vy, 32);
150 y += 4;
151 }
152 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
153 if (n & (2 * sizeof(uint8_t))) {
154 unaligned_store_u16(y, (uint16_t) vy_lo);
155 vy_lo >>= 16;
156 y += 2;
157 }
158 if (n & (1 * sizeof(uint8_t))) {
159 *y = (uint8_t) vy_lo;
160 }
161 }
162 }
163