xref: /aosp_15_r20/external/XNNPACK/src/f32-dwconv/gen/up16x9-minmax-avx.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-avx.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_f32_dwconv_minmax_ukernel_up16x9__avx(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_dwconv_minmax_ukernel_up16x9__avx(
18     size_t channels,
19     size_t output_width,
20     const float** input,
21     const float* weights,
22     float* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const float* zero,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const __m256 vmax = _mm256_load_ps(params->avx.max);
33   const __m256 vmin = _mm256_load_ps(params->avx.min);
34   do {
35     const float* i0 = input[0];
36     assert(i0 != NULL);
37     if XNN_UNPREDICTABLE(i0 != zero) {
38       i0 = (const float*) ((uintptr_t) i0 + input_offset);
39     }
40     const float* i1 = input[1];
41     assert(i1 != NULL);
42     if XNN_UNPREDICTABLE(i1 != zero) {
43       i1 = (const float*) ((uintptr_t) i1 + input_offset);
44     }
45     const float* i2 = input[2];
46     assert(i2 != NULL);
47     if XNN_UNPREDICTABLE(i2 != zero) {
48       i2 = (const float*) ((uintptr_t) i2 + input_offset);
49     }
50     const float* i3 = input[3];
51     assert(i3 != NULL);
52     if XNN_UNPREDICTABLE(i3 != zero) {
53       i3 = (const float*) ((uintptr_t) i3 + input_offset);
54     }
55     const float* i4 = input[4];
56     assert(i4 != NULL);
57     if XNN_UNPREDICTABLE(i4 != zero) {
58       i4 = (const float*) ((uintptr_t) i4 + input_offset);
59     }
60     const float* i5 = input[5];
61     assert(i5 != NULL);
62     if XNN_UNPREDICTABLE(i5 != zero) {
63       i5 = (const float*) ((uintptr_t) i5 + input_offset);
64     }
65     const float* i6 = input[6];
66     assert(i6 != NULL);
67     if XNN_UNPREDICTABLE(i6 != zero) {
68       i6 = (const float*) ((uintptr_t) i6 + input_offset);
69     }
70     const float* i7 = input[7];
71     assert(i7 != NULL);
72     if XNN_UNPREDICTABLE(i7 != zero) {
73       i7 = (const float*) ((uintptr_t) i7 + input_offset);
74     }
75     const float* i8 = input[8];
76     assert(i8 != NULL);
77     if XNN_UNPREDICTABLE(i8 != zero) {
78       i8 = (const float*) ((uintptr_t) i8 + input_offset);
79     }
80     input = (const float**) ((uintptr_t) input + input_stride);
81 
82     size_t c = channels;
83     const float* w = weights;
84     for (; c >= 16; c -= 16) {
85       __m256 vacc01234567p0 = _mm256_load_ps(w);
86       __m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
87 
88 
89       const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
90       const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
91       i0 += 16;
92 
93       const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
94       const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
95       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
96       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi0x89ABCDEF, vk0x89ABCDEF));
97 
98       const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
99       const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
100       i1 += 16;
101 
102       const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
103       const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
104       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
105       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF));
106 
107       const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
108       const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
109       i2 += 16;
110 
111       const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
112       const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
113       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
114       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi2x89ABCDEF, vk2x89ABCDEF));
115 
116       const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
117       const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
118       i3 += 16;
119 
120       const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
121       const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
122       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
123       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi3x89ABCDEF, vk3x89ABCDEF));
124 
125       const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
126       const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
127       i4 += 16;
128 
129       const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
130       const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
131       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
132       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi4x89ABCDEF, vk4x89ABCDEF));
133 
134       const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
135       const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
136       i5 += 16;
137 
138       const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
139       const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 104);
140       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
141       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi5x89ABCDEF, vk5x89ABCDEF));
142 
143       const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
144       const __m256 vi6x89ABCDEF = _mm256_loadu_ps(i6 + 8);
145       i6 += 16;
146 
147       const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
148       const __m256 vk6x89ABCDEF = _mm256_load_ps(w + 120);
149       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
150       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi6x89ABCDEF, vk6x89ABCDEF));
151 
152       const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
153       const __m256 vi7x89ABCDEF = _mm256_loadu_ps(i7 + 8);
154       i7 += 16;
155 
156       const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
157       const __m256 vk7x89ABCDEF = _mm256_load_ps(w + 136);
158       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
159       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi7x89ABCDEF, vk7x89ABCDEF));
160 
161       const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
162       const __m256 vi8x89ABCDEF = _mm256_loadu_ps(i8 + 8);
163       i8 += 16;
164 
165       const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
166       const __m256 vk8x89ABCDEF = _mm256_load_ps(w + 152);
167       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
168       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, _mm256_mul_ps(vi8x89ABCDEF, vk8x89ABCDEF));
169 
170       w += 160;
171 
172 
173       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
174       __m256 vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEFp0, vmin);
175       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
176       vacc89ABCDEF = _mm256_min_ps(vacc89ABCDEF, vmax);
177 
178       _mm256_storeu_ps(output, vacc01234567);
179       _mm256_storeu_ps(output + 8, vacc89ABCDEF);
180       output += 16;
181     }
182     for (; c >= 8; c -= 8) {
183       __m256 vacc01234567p0 = _mm256_load_ps(w);
184 
185       const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
186       i0 += 8;
187 
188       const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
189       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
190 
191       const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
192       i1 += 8;
193 
194       const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
195       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
196 
197       const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
198       i2 += 8;
199 
200       const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
201       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
202 
203       const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
204       i3 += 8;
205 
206       const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
207       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
208 
209       const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
210       i4 += 8;
211 
212       const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
213       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
214 
215       const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
216       i5 += 8;
217 
218       const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
219       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
220 
221       const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
222       i6 += 8;
223 
224       const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
225       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
226 
227       const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
228       i7 += 8;
229 
230       const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
231       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
232 
233       const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
234       i8 += 8;
235 
236       const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
237       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
238 
239       w += 8;
240 
241 
242       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
243       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
244 
245       _mm256_storeu_ps(output, vacc01234567);
246       output += 8;
247     }
248     if XNN_UNLIKELY(c != 0) {
249       assert(c >= 1);
250       assert(c <= 7);
251       const __m256i vmask = _mm256_loadu_si256((const __m256i*) &params->avx.mask_table[7 - c]);
252 
253       __m256 vacc01234567p0 = _mm256_load_ps(w);
254 
255       const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
256       const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
257       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi0x01234567, vk0x01234567));
258 
259       const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
260       const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
261       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi1x01234567, vk1x01234567));
262 
263       const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
264       const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
265       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi2x01234567, vk2x01234567));
266 
267       const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
268       const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
269       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi3x01234567, vk3x01234567));
270 
271       const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
272       const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
273       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi4x01234567, vk4x01234567));
274 
275       const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
276       const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
277       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi5x01234567, vk5x01234567));
278 
279       const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
280       const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
281       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi6x01234567, vk6x01234567));
282 
283       const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
284       const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
285       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi7x01234567, vk7x01234567));
286 
287       const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
288       const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
289       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, _mm256_mul_ps(vi8x01234567, vk8x01234567));
290 
291 
292       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
293       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
294 
295       __m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
296       if (c & 4) {
297         _mm_storeu_ps(output, vacc0123);
298         vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
299         output += 4;
300       }
301       if (c & 2) {
302         _mm_storel_pi((__m64*) output, vacc0123);
303         vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
304         output += 2;
305       }
306       if (c & 1) {
307         _mm_store_ss(output, vacc0123);
308         output += 1;
309       }
310     }
311 
312     output = (float*) ((uintptr_t) output + output_increment);
313   } while (--output_width != 0);
314 }
315