xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen-inc/4x16s4inc-minmax-fma3-broadcast.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-shuffle4.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemminc_minmax_ukernel_4x16s4__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemminc_minmax_ukernel_4x16s4__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const float*restrict acc,
28     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 4);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38   assert(acc != NULL);
39 
40   const float* a0 = a;
41   float* c0 = c;
42   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr != 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60 
61   do {
62     __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
63     __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
64     __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
65     __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
66     __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
67     __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
68     __m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
69     __m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
70     acc += 64;
71 
72     size_t k = kc;
73     while (k >= 4 * sizeof(float)) {
74       __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
75       a0 += 4;
76       __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
77       a1 += 4;
78       __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
79       a2 += 4;
80       __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
81       a3 += 4;
82 
83 
84       const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
85       const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
86 
87       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
88       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
89       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
90       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c0, vacc3x01234567);
91       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
92       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
93       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
94       vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc0, vacc3x89ABCDEF);
95 
96       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
97       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
98       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
99       va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
100 
101       const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
102       const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
103 
104       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
105       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
106       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
107       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c1, vacc3x01234567);
108       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
109       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
110       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
111       vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc1, vacc3x89ABCDEF);
112 
113       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
114       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
115       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
116       va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
117 
118       const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
119       const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
120 
121       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
122       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
123       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
124       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c2, vacc3x01234567);
125       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
126       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
127       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
128       vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc2, vacc3x89ABCDEF);
129 
130       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
131       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
132       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
133       va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
134 
135       const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
136       const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
137 
138       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
139       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
140       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
141       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c3, vacc3x01234567);
142       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
143       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
144       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
145       vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc3, vacc3x89ABCDEF);
146 
147 
148       w += 64;
149       k -= 4 * sizeof(float);
150     }
151     if XNN_UNLIKELY(k != 0) {
152       __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
153       a0 = (const float*) ((uintptr_t) a0 + k);
154       __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
155       a1 = (const float*) ((uintptr_t) a1 + k);
156       __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
157       a2 = (const float*) ((uintptr_t) a2 + k);
158       __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
159       a3 = (const float*) ((uintptr_t) a3 + k);
160 
161       const __m256 vzero = _mm256_setzero_ps();
162 
163       const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
164       const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
165 
166       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc0x01234567);
167       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc1x01234567);
168       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc2x01234567);
169       vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc3x01234567);
170       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc0x89ABCDEF);
171       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc1x89ABCDEF);
172       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc2x89ABCDEF);
173       vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc3x89ABCDEF);
174 
175       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
176       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
177       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
178       va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
179 
180       const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
181       const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
182 
183       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc0x01234567);
184       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc1x01234567);
185       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc2x01234567);
186       vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc3x01234567);
187       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc0x89ABCDEF);
188       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc1x89ABCDEF);
189       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc2x89ABCDEF);
190       vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc3x89ABCDEF);
191 
192       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
193       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
194       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
195       va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
196 
197       const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
198       const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
199 
200       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc0x01234567);
201       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc1x01234567);
202       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc2x01234567);
203       vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc3x01234567);
204       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc0x89ABCDEF);
205       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc1x89ABCDEF);
206       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc2x89ABCDEF);
207       vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc3x89ABCDEF);
208 
209       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
210       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
211       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
212       va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
213 
214       const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
215       const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
216 
217       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc0x01234567);
218       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc1x01234567);
219       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc2x01234567);
220       vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc3x01234567);
221       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc0x89ABCDEF);
222       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc1x89ABCDEF);
223       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc2x89ABCDEF);
224       vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc3x89ABCDEF);
225 
226 
227       w += 64;
228     }
229 
230     const __m256 vmin = _mm256_load_ps(params->avx.min);
231     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
232     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
233     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
234     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
235     vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
236     vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
237     vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
238     vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
239 
240     const __m256 vmax = _mm256_load_ps(params->avx.max);
241     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
242     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
243     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
244     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
245     vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
246     vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
247     vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
248     vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
249 
250     if XNN_LIKELY(nc >= 16) {
251       _mm256_storeu_ps(c3, vacc3x01234567);
252       _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
253       c3 = (float*) ((uintptr_t) c3 + cn_stride);
254       _mm256_storeu_ps(c2, vacc2x01234567);
255       _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
256       c2 = (float*) ((uintptr_t) c2 + cn_stride);
257       _mm256_storeu_ps(c1, vacc1x01234567);
258       _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
259       c1 = (float*) ((uintptr_t) c1 + cn_stride);
260       _mm256_storeu_ps(c0, vacc0x01234567);
261       _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
262       c0 = (float*) ((uintptr_t) c0 + cn_stride);
263 
264       a3 = (const float*) ((uintptr_t) a3 - kc);
265       a2 = (const float*) ((uintptr_t) a2 - kc);
266       a1 = (const float*) ((uintptr_t) a1 - kc);
267       a0 = (const float*) ((uintptr_t) a0 - kc);
268 
269       nc -= 16;
270     } else {
271       if (nc & 8) {
272         _mm256_storeu_ps(c3, vacc3x01234567);
273         _mm256_storeu_ps(c2, vacc2x01234567);
274         _mm256_storeu_ps(c1, vacc1x01234567);
275         _mm256_storeu_ps(c0, vacc0x01234567);
276 
277         vacc3x01234567 = vacc3x89ABCDEF;
278         vacc2x01234567 = vacc2x89ABCDEF;
279         vacc1x01234567 = vacc1x89ABCDEF;
280         vacc0x01234567 = vacc0x89ABCDEF;
281 
282         c3 += 8;
283         c2 += 8;
284         c1 += 8;
285         c0 += 8;
286       }
287       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
288       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
289       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
290       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
291       if (nc & 4) {
292         _mm_storeu_ps(c3, vacc3x0123);
293         _mm_storeu_ps(c2, vacc2x0123);
294         _mm_storeu_ps(c1, vacc1x0123);
295         _mm_storeu_ps(c0, vacc0x0123);
296 
297         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
298         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
299         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
300         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
301 
302         c3 += 4;
303         c2 += 4;
304         c1 += 4;
305         c0 += 4;
306       }
307       if (nc & 2) {
308         _mm_storel_pi((__m64*) c3, vacc3x0123);
309         _mm_storel_pi((__m64*) c2, vacc2x0123);
310         _mm_storel_pi((__m64*) c1, vacc1x0123);
311         _mm_storel_pi((__m64*) c0, vacc0x0123);
312 
313         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
314         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
315         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
316         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
317 
318         c3 += 2;
319         c2 += 2;
320         c1 += 2;
321         c0 += 2;
322       }
323       if (nc & 1) {
324         _mm_store_ss(c3, vacc3x0123);
325         _mm_store_ss(c2, vacc2x0123);
326         _mm_store_ss(c1, vacc1x0123);
327         _mm_store_ss(c0, vacc0x0123);
328       }
329 
330       nc = 0;
331     }
332   } while (nc != 0);
333 }
334