1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qu8_dwconv_minmax_fp32_ukernel_up8x9__sse41_mul32(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_dwconv_minmax_fp32_ukernel_up8x9__sse41_mul32(
20 size_t channels,
21 size_t output_width,
22 const uint8_t** input,
23 const void* weights,
24 uint8_t* output,
25 size_t input_stride,
26 size_t output_increment,
27 size_t input_offset,
28 const uint8_t* zero,
29 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(channels != 0);
32 assert(output_width != 0);
33
34 const __m128i vk_zero_point = _mm_cvtepu16_epi32(_mm_loadl_epi64((const __m128i*) params->fp32_sse2.kernel_zero_point));
35 do {
36 const uint8_t* i0 = input[0];
37 assert(i0 != NULL);
38 if XNN_UNPREDICTABLE(i0 != zero) {
39 i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
40 }
41 const uint8_t* i1 = input[1];
42 assert(i1 != NULL);
43 if XNN_UNPREDICTABLE(i1 != zero) {
44 i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
45 }
46 const uint8_t* i2 = input[2];
47 assert(i2 != NULL);
48 if XNN_UNPREDICTABLE(i2 != zero) {
49 i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
50 }
51 const uint8_t* i3 = input[3];
52 assert(i3 != NULL);
53 if XNN_UNPREDICTABLE(i3 != zero) {
54 i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
55 }
56 const uint8_t* i4 = input[4];
57 assert(i4 != NULL);
58 if XNN_UNPREDICTABLE(i4 != zero) {
59 i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
60 }
61 const uint8_t* i5 = input[5];
62 assert(i5 != NULL);
63 if XNN_UNPREDICTABLE(i5 != zero) {
64 i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
65 }
66 const uint8_t* i6 = input[6];
67 assert(i6 != NULL);
68 if XNN_UNPREDICTABLE(i6 != zero) {
69 i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
70 }
71 const uint8_t* i7 = input[7];
72 assert(i7 != NULL);
73 if XNN_UNPREDICTABLE(i7 != zero) {
74 i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
75 }
76 const uint8_t* i8 = input[8];
77 assert(i8 != NULL);
78 if XNN_UNPREDICTABLE(i8 != zero) {
79 i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
80 }
81 input = (const uint8_t**) ((uintptr_t) input + input_stride);
82
83 size_t c = channels;
84 const void* w = weights;
85 for (; c >= 8; c -= 8) {
86 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
87 __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
88
89
90 const __m128i vi0x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
91 const __m128i vk0x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t))))), vk_zero_point);
92 const __m128i vi0x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
93 const __m128i vk0x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 4 * sizeof(uint8_t))))), vk_zero_point);
94 i0 += 8;
95
96 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
97 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi0x4567, vk0x4567));
98
99 const __m128i vi1x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
100 const __m128i vk1x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t))))), vk_zero_point);
101 const __m128i vi1x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
102 const __m128i vk1x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 12 * sizeof(uint8_t))))), vk_zero_point);
103 i1 += 8;
104
105 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
106 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi1x4567, vk1x4567));
107
108 const __m128i vi2x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
109 const __m128i vk2x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t))))), vk_zero_point);
110 const __m128i vi2x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
111 const __m128i vk2x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 20 * sizeof(uint8_t))))), vk_zero_point);
112 i2 += 8;
113
114 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
115 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi2x4567, vk2x4567));
116
117 const __m128i vi3x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
118 const __m128i vk3x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t))))), vk_zero_point);
119 const __m128i vi3x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
120 const __m128i vk3x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 28 * sizeof(uint8_t))))), vk_zero_point);
121 i3 += 8;
122
123 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
124 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi3x4567, vk3x4567));
125
126 const __m128i vi4x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
127 const __m128i vk4x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t))))), vk_zero_point);
128 const __m128i vi4x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
129 const __m128i vk4x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 36 * sizeof(uint8_t))))), vk_zero_point);
130 i4 += 8;
131
132 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
133 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi4x4567, vk4x4567));
134
135 const __m128i vi5x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
136 const __m128i vk5x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t))))), vk_zero_point);
137 const __m128i vi5x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
138 const __m128i vk5x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 44 * sizeof(uint8_t))))), vk_zero_point);
139 i5 += 8;
140
141 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
142 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi5x4567, vk5x4567));
143
144 const __m128i vi6x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
145 const __m128i vk6x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t))))), vk_zero_point);
146 const __m128i vi6x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
147 const __m128i vk6x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 52 * sizeof(uint8_t))))), vk_zero_point);
148 i6 += 8;
149
150 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
151 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi6x4567, vk6x4567));
152
153 const __m128i vi7x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
154 const __m128i vk7x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t))))), vk_zero_point);
155 const __m128i vi7x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
156 const __m128i vk7x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 60 * sizeof(uint8_t))))), vk_zero_point);
157 i7 += 8;
158
159 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
160 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi7x4567, vk7x4567));
161
162 const __m128i vi8x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
163 const __m128i vk8x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t))))), vk_zero_point);
164 const __m128i vi8x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
165 const __m128i vk8x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 68 * sizeof(uint8_t))))), vk_zero_point);
166 i8 += 8;
167
168 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
169 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi8x4567, vk8x4567));
170
171 w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(uint8_t));
172
173 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
174 __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
175
176 const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
177 vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
178 vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
179
180 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
181 vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
182 vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
183
184 vacc0123 = _mm_cvtps_epi32(vscaled0123);
185 vacc4567 = _mm_cvtps_epi32(vscaled4567);
186
187 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
188 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
189
190 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
191 __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
192 vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
193
194 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
195 output += 8;
196 }
197 if XNN_UNLIKELY(c != 0) {
198 const uint8_t* k = (const uint8_t*) ((const int32_t*) w + 8);
199 do {
200 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
201
202 const __m128i vi0x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
203 const __m128i vk0x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) k))), vk_zero_point);
204 i0 += 4;
205
206 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
207 const __m128i vi1x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
208 const __m128i vk1x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 8)))), vk_zero_point);
209 i1 += 4;
210
211 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
212 const __m128i vi2x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
213 const __m128i vk2x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 16)))), vk_zero_point);
214 i2 += 4;
215
216 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
217 const __m128i vi3x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
218 const __m128i vk3x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 24)))), vk_zero_point);
219 i3 += 4;
220
221 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
222 const __m128i vi4x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
223 const __m128i vk4x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 32)))), vk_zero_point);
224 i4 += 4;
225
226 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
227 const __m128i vi5x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
228 const __m128i vk5x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 40)))), vk_zero_point);
229 i5 += 4;
230
231 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
232 const __m128i vi6x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
233 const __m128i vk6x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48)))), vk_zero_point);
234 i6 += 4;
235
236 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
237 const __m128i vi7x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
238 const __m128i vk7x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 56)))), vk_zero_point);
239 i7 += 4;
240
241 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
242 const __m128i vi8x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
243 const __m128i vk8x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 64)))), vk_zero_point);
244 i8 += 4;
245
246 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
247
248 k += 4;
249
250 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
251 vscaled0123 = _mm_mul_ps(vscaled0123, _mm_load_ps(params->fp32_sse2.scale));
252 vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse2.output_max_less_zero_point));
253 vacc0123 = _mm_cvtps_epi32(vscaled0123);
254
255 w = (const void*) ((const int32_t*) w + 4);
256
257 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
258 __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
259
260 vout0123 = _mm_packus_epi16(vout0123, vout0123);
261 vout0123 = _mm_max_epu8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
262
263 if XNN_LIKELY(c >= 4) {
264 _mm_storeu_si32(output, vout0123);
265 output += 4;
266 c -= 4;
267 } else {
268 if (c & 2) {
269 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
270 vout0123 = _mm_srli_epi32(vout0123, 16);
271 output += 2;
272 }
273 if (c & 1) {
274 *output = (uint8_t) _mm_extract_epi8(vout0123, 0);
275 output += 1;
276 }
277 c = 0;
278 }
279 } while (c != 0);
280 }
281
282 output = (uint8_t*) ((uintptr_t) output + output_increment);
283 } while (--output_width != 0);
284 }
285