xref: /aosp_15_r20/external/XNNPACK/src/x16-transposec/gen/8x8-reuse-switch-sse2.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/x32-transposec/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <immintrin.h>
11 
12 #include <assert.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/transpose.h>
17 #include <xnnpack/unaligned.h>
18 
19 
xnn_x16_transposec_ukernel__8x8_reuse_switch_sse2(const uint16_t * input,uint16_t * output,size_t input_stride,size_t output_stride,size_t block_width,size_t block_height)20 void xnn_x16_transposec_ukernel__8x8_reuse_switch_sse2(
21     const uint16_t* input,
22     uint16_t* output,
23     size_t input_stride,
24     size_t output_stride,
25     size_t block_width,
26     size_t block_height) XNN_OOB_READS
27 {
28   assert(output_stride >= block_height * sizeof(uint16_t));
29   assert(input_stride >= block_width * sizeof(uint16_t));
30 
31   const size_t tile_height = 8;
32   const size_t tile_width = 8;
33   const size_t tile_hbytes = tile_height * sizeof(uint16_t);
34   const size_t tile_wbytes = tile_width * sizeof(uint16_t);
35   const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
36   const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
37 
38   const uint16_t* i0 = input;
39   uint16_t* o = (uint16_t*) output;
40   const size_t minus_output_stride = -output_stride;
41 
42   do {
43     const size_t rem = min(block_width - 1, 7);
44     const size_t oN_stride = rem * output_stride;
45     size_t bh = block_height;
46     for (; bh >= 8; bh -= 8) {
47       const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
48       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
49       const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i0);
50       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
51       const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i0);
52       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
53       const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i0);
54       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
55       const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i0);
56       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
57       const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i0);
58       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
59       const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i0);
60       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
61       const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i0);
62       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
63 
64       const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
65       const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
66       const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
67       const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
68       const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
69       const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
70       const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
71       const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
72 
73       const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
74       const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
75       const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
76       const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
77       const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
78       const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
79       const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
80       const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
81 
82       const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
83       const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
84       const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
85       const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
86       const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
87       const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
88       const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
89       const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
90 
91 
92       uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
93       switch (rem) {
94         case 7:
95           _mm_storeu_si128((__m128i*) oN, v0_7);
96           oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
97         case 6:
98           _mm_storeu_si128((__m128i*) oN, v0_6);
99           oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
100         case 5:
101           _mm_storeu_si128((__m128i*) oN, v0_5);
102           oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
103         case 4:
104           _mm_storeu_si128((__m128i*) oN, v0_4);
105           oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
106         case 3:
107           _mm_storeu_si128((__m128i*) oN, v0_3);
108           oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
109         case 2:
110           _mm_storeu_si128((__m128i*) oN, v0_2);
111           oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
112         case 1:
113           _mm_storeu_si128((__m128i*) oN, v0_1);
114         case 0:
115           _mm_storeu_si128((__m128i*) o, v0_0);
116           o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
117           break;
118         default:
119           XNN_UNREACHABLE;
120       }
121     }
122     if (bh != 0) {
123       const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
124       const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
125       if XNN_UNPREDICTABLE(bh < 2) {
126         i1 = i0;
127       }
128       const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
129       const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
130       if XNN_UNPREDICTABLE(bh <= 2) {
131         i2 = i1;
132       }
133       const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
134       const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
135       if XNN_UNPREDICTABLE(bh < 4) {
136         i3 = i2;
137       }
138       const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
139       const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
140       if XNN_UNPREDICTABLE(bh <= 4) {
141         i4 = i3;
142       }
143       const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
144       const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
145       if XNN_UNPREDICTABLE(bh < 6) {
146         i5 = i4;
147       }
148       const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
149       const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
150       if XNN_UNPREDICTABLE(bh <= 6) {
151         i6 = i5;
152       }
153       const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
154       const __m128i v3_7 = _mm_undefined_si128();
155 
156       const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
157       const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
158       const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
159       const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
160       const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
161       const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
162       const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
163       const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
164 
165       const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
166       const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
167       const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
168       const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
169       const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
170       const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
171       const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
172       const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
173 
174       __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
175       __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
176       __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
177       __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
178       __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
179       __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
180       __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
181       __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
182 
183 
184       if (bh & 4) {
185         uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
186         switch (rem) {
187           case 7:
188             _mm_storel_epi64((__m128i*) oN, v0_7);
189             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
190           case 6:
191             _mm_storel_epi64((__m128i*) oN, v0_6);
192             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
193           case 5:
194             _mm_storel_epi64((__m128i*) oN, v0_5);
195             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
196           case 4:
197             _mm_storel_epi64((__m128i*) oN, v0_4);
198             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
199           case 3:
200             _mm_storel_epi64((__m128i*) oN, v0_3);
201             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
202           case 2:
203             _mm_storel_epi64((__m128i*) oN, v0_2);
204             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
205           case 1:
206             _mm_storel_epi64((__m128i*) oN, v0_1);
207           case 0:
208             _mm_storel_epi64((__m128i*) o, v0_0);
209             break;
210           default:
211             XNN_UNREACHABLE;
212         }
213         o += 4;
214         v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
215         v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
216         v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
217         v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
218         v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
219         v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
220         v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
221         v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
222       }
223 
224       if (bh & 2) {
225         uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
226         switch (rem) {
227           case 7:
228             unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_7));
229             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
230           case 6:
231             unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_6));
232             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
233           case 5:
234             unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_5));
235             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
236           case 4:
237             unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_4));
238             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
239           case 3:
240             unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_3));
241             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
242           case 2:
243             unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_2));
244             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
245           case 1:
246             unaligned_store_u32(oN, (uint32_t) _mm_cvtsi128_si32(v0_1));
247           case 0:
248             unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
249             break;
250           default:
251             XNN_UNREACHABLE;
252         }
253         o += 2;
254         v0_0 = _mm_srli_epi64(v0_0, 32);
255         v0_1 = _mm_srli_epi64(v0_1, 32);
256         v0_2 = _mm_srli_epi64(v0_2, 32);
257         v0_3 = _mm_srli_epi64(v0_3, 32);
258         v0_4 = _mm_srli_epi64(v0_4, 32);
259         v0_5 = _mm_srli_epi64(v0_5, 32);
260         v0_6 = _mm_srli_epi64(v0_6, 32);
261         v0_7 = _mm_srli_epi64(v0_7, 32);
262       }
263       if (bh & 1) {
264         uint16_t* oN = (uint16_t*) ((uintptr_t) o + oN_stride);
265         switch (rem) {
266           case 7:
267             unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_7));
268             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
269           case 6:
270             unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_6));
271             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
272           case 5:
273             unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_5));
274             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
275           case 4:
276             unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_4));
277             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
278           case 3:
279             unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_3));
280             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
281           case 2:
282             unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_2));
283             oN = (uint16_t*) ((uintptr_t) oN + minus_output_stride);
284           case 1:
285             unaligned_store_u16(oN, (uint16_t) _mm_cvtsi128_si32(v0_1));
286           case 0:
287             unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_0));
288             break;
289           default:
290             XNN_UNREACHABLE;
291         }
292       }
293     }
294 
295     i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
296     o = (uint16_t*) ((uintptr_t) o + output_reset);
297     block_width = doz(block_width, tile_width);
298   } while (block_width != 0);
299 }
300