1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-neon-mul8.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15
16
xnn_qs8_dwconv_minmax_rndnu_ukernel_up16x9__neon_mla8_ld128(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_dwconv_minmax_rndnu_ukernel_up16x9__neon_mla8_ld128(
18 size_t channels,
19 size_t output_width,
20 const int8_t** input,
21 const void* weights,
22 int8_t* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const int8_t* zero,
27 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
33 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
34 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
35 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
36 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
37 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
38 do {
39 const int8_t* i0 = input[0];
40 assert(i0 != NULL);
41 if XNN_UNPREDICTABLE(i0 != zero) {
42 i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
43 }
44 const int8_t* i1 = input[1];
45 assert(i1 != NULL);
46 if XNN_UNPREDICTABLE(i1 != zero) {
47 i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
48 }
49 const int8_t* i2 = input[2];
50 assert(i2 != NULL);
51 if XNN_UNPREDICTABLE(i2 != zero) {
52 i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
53 }
54 const int8_t* i3 = input[3];
55 assert(i3 != NULL);
56 if XNN_UNPREDICTABLE(i3 != zero) {
57 i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
58 }
59 const int8_t* i4 = input[4];
60 assert(i4 != NULL);
61 if XNN_UNPREDICTABLE(i4 != zero) {
62 i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
63 }
64 const int8_t* i5 = input[5];
65 assert(i5 != NULL);
66 if XNN_UNPREDICTABLE(i5 != zero) {
67 i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
68 }
69 const int8_t* i6 = input[6];
70 assert(i6 != NULL);
71 if XNN_UNPREDICTABLE(i6 != zero) {
72 i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
73 }
74 const int8_t* i7 = input[7];
75 assert(i7 != NULL);
76 if XNN_UNPREDICTABLE(i7 != zero) {
77 i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
78 }
79 const int8_t* i8 = input[8];
80 assert(i8 != NULL);
81 if XNN_UNPREDICTABLE(i8 != zero) {
82 i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
83 }
84 input = (const int8_t**) ((uintptr_t) input + input_stride);
85
86 size_t c = channels;
87 const void* w = weights;
88 for (; c >= 16; c -= 16) {
89 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
90 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
91 int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
92 int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
93
94 const int8x16_t vi0x0123456789ABCDEF = vld1q_s8(i0); i0 += 16;
95 const int8x16_t vk0x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
96
97 int16x8_t vprod01234567 = vmull_s8(vget_low_s8(vi0x0123456789ABCDEF), vget_low_s8(vk0x0123456789ABCDEF));
98 int16x8_t vprod89ABCDEF = vmull_s8(vget_high_s8(vi0x0123456789ABCDEF), vget_high_s8(vk0x0123456789ABCDEF));
99
100 const int8x16_t vi1x0123456789ABCDEF = vld1q_s8(i1); i1 += 16;
101 const int8x16_t vk1x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
102
103 vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi1x0123456789ABCDEF), vget_low_s8(vk1x0123456789ABCDEF));
104 vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi1x0123456789ABCDEF), vget_high_s8(vk1x0123456789ABCDEF));
105
106 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
107 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
108 vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
109 vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
110 const int8x16_t vi2x0123456789ABCDEF = vld1q_s8(i2); i2 += 16;
111 const int8x16_t vk2x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
112
113 vprod01234567 = vmull_s8(vget_low_s8(vi2x0123456789ABCDEF), vget_low_s8(vk2x0123456789ABCDEF));
114 vprod89ABCDEF = vmull_s8(vget_high_s8(vi2x0123456789ABCDEF), vget_high_s8(vk2x0123456789ABCDEF));
115
116 const int8x16_t vi3x0123456789ABCDEF = vld1q_s8(i3); i3 += 16;
117 const int8x16_t vk3x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
118
119 vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi3x0123456789ABCDEF), vget_low_s8(vk3x0123456789ABCDEF));
120 vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi3x0123456789ABCDEF), vget_high_s8(vk3x0123456789ABCDEF));
121
122 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
123 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
124 vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
125 vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
126 const int8x16_t vi4x0123456789ABCDEF = vld1q_s8(i4); i4 += 16;
127 const int8x16_t vk4x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
128
129 vprod01234567 = vmull_s8(vget_low_s8(vi4x0123456789ABCDEF), vget_low_s8(vk4x0123456789ABCDEF));
130 vprod89ABCDEF = vmull_s8(vget_high_s8(vi4x0123456789ABCDEF), vget_high_s8(vk4x0123456789ABCDEF));
131
132 const int8x16_t vi5x0123456789ABCDEF = vld1q_s8(i5); i5 += 16;
133 const int8x16_t vk5x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
134
135 vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi5x0123456789ABCDEF), vget_low_s8(vk5x0123456789ABCDEF));
136 vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi5x0123456789ABCDEF), vget_high_s8(vk5x0123456789ABCDEF));
137
138 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
139 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
140 vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
141 vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
142 const int8x16_t vi6x0123456789ABCDEF = vld1q_s8(i6); i6 += 16;
143 const int8x16_t vk6x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
144
145 vprod01234567 = vmull_s8(vget_low_s8(vi6x0123456789ABCDEF), vget_low_s8(vk6x0123456789ABCDEF));
146 vprod89ABCDEF = vmull_s8(vget_high_s8(vi6x0123456789ABCDEF), vget_high_s8(vk6x0123456789ABCDEF));
147
148 const int8x16_t vi7x0123456789ABCDEF = vld1q_s8(i7); i7 += 16;
149 const int8x16_t vk7x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
150
151 vprod01234567 = vmlal_s8(vprod01234567, vget_low_s8(vi7x0123456789ABCDEF), vget_low_s8(vk7x0123456789ABCDEF));
152 vprod89ABCDEF = vmlal_s8(vprod89ABCDEF, vget_high_s8(vi7x0123456789ABCDEF), vget_high_s8(vk7x0123456789ABCDEF));
153
154 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
155 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
156 vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
157 vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
158 const int8x16_t vi8x0123456789ABCDEF = vld1q_s8(i8); i8 += 16;
159 const int8x16_t vk8x0123456789ABCDEF = vld1q_s8(w); w = (const void*) ((const int8_t*) w + 16);
160
161 vprod01234567 = vmull_s8(vget_low_s8(vi8x0123456789ABCDEF), vget_low_s8(vk8x0123456789ABCDEF));
162 vprod89ABCDEF = vmull_s8(vget_high_s8(vi8x0123456789ABCDEF), vget_high_s8(vk8x0123456789ABCDEF));
163
164 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
165 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
166 vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vprod89ABCDEF));
167 vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vprod89ABCDEF));
168
169 vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
170 vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
171 vacc89AB = vqshlq_s32(vacc89AB, vright_pre_shift);
172 vaccCDEF = vqshlq_s32(vaccCDEF, vright_pre_shift);
173
174 vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
175 vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
176 vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
177 vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
178
179 vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
180 vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
181 vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
182 vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
183
184 #if XNN_ARCH_ARM64
185 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
186 int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
187
188 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
189 vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
190
191 int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
192 #else // !XNN_ARCH_ARM64
193 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
194 int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
195
196 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
197 vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
198
199 int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
200 #endif // !XNN_ARCH_ARM64
201
202 vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
203
204 vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
205
206 vst1q_s8(output, vout0123456789ABCDEF); output += 16;
207 }
208 if XNN_UNLIKELY(c != 0) {
209 const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
210 do {
211 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
212 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
213
214 const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
215 const int8x8_t vk0x01234567 = vld1_s8(k); k += 8;
216
217 int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
218
219 const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
220 const int8x8_t vk1x01234567 = vld1_s8((const void*) (k + 8));
221
222 vprod01234567 = vmlal_s8(vprod01234567, vi1x01234567, vk1x01234567);
223
224 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
225 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
226 const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
227 const int8x8_t vk2x01234567 = vld1_s8((const void*) (k + 24));
228
229 vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
230
231 const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
232 const int8x8_t vk3x01234567 = vld1_s8((const void*) (k + 40));
233
234 vprod01234567 = vmlal_s8(vprod01234567, vi3x01234567, vk3x01234567);
235
236 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
237 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
238 const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
239 const int8x8_t vk4x01234567 = vld1_s8((const void*) (k + 56));
240
241 vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
242
243 const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
244 const int8x8_t vk5x01234567 = vld1_s8((const void*) (k + 72));
245
246 vprod01234567 = vmlal_s8(vprod01234567, vi5x01234567, vk5x01234567);
247
248 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
249 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
250 const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
251 const int8x8_t vk6x01234567 = vld1_s8((const void*) (k + 88));
252
253 vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
254
255 const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
256 const int8x8_t vk7x01234567 = vld1_s8((const void*) (k + 104));
257
258 vprod01234567 = vmlal_s8(vprod01234567, vi7x01234567, vk7x01234567);
259
260 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
261 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
262 const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
263 const int8x8_t vk8x01234567 = vld1_s8((const void*) (k + 120));
264
265 vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
266
267 vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
268 vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
269
270 vacc0123 = vqshlq_s32(vacc0123, vright_pre_shift);
271 vacc4567 = vqshlq_s32(vacc4567, vright_pre_shift);
272
273 vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
274 vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
275
276 vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
277 vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
278
279 #if XNN_ARCH_ARM64
280 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
281 #else
282 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
283 #endif
284 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
285
286 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
287 vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
288 vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
289
290 if XNN_LIKELY(c >= 8) {
291 vst1_s8(output, vout01234567); output += 8;
292 c -= 8;
293 } else {
294 if (c & 4) {
295 vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
296 vout01234567 = vext_s8(vout01234567, vout01234567, 4);
297 }
298 if (c & 2) {
299 vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
300 vout01234567 = vext_s8(vout01234567, vout01234567, 2);
301 }
302 if (c & 1) {
303 vst1_lane_s8(output, vout01234567, 0); output += 1;
304 }
305 c = 0;
306 }
307 } while (c != 0);
308 }
309
310 output = (int8_t*) ((uintptr_t) output + output_increment);
311 } while (--output_width != 0);
312 }
313