1 // Auto-generated file. Do not edit!
2 // Template: src/x32-transposec/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <wasm_simd128.h>
11
12 #include <assert.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/transpose.h>
17
xnn_x16_transposec_ukernel__8x8_reuse_multi_wasmsimd(const uint16_t * input,uint16_t * output,size_t input_stride,size_t output_stride,size_t block_width,size_t block_height)18 void xnn_x16_transposec_ukernel__8x8_reuse_multi_wasmsimd(
19 const uint16_t* input,
20 uint16_t* output,
21 size_t input_stride,
22 size_t output_stride,
23 size_t block_width,
24 size_t block_height) XNN_OOB_READS
25 {
26 assert(output_stride >= block_height * sizeof(uint16_t));
27 assert(input_stride >= block_width * sizeof(uint16_t));
28
29 const size_t tile_height = 8;
30 const size_t tile_width = 8;
31 const size_t tile_hbytes = tile_height * sizeof(uint16_t);
32 const size_t tile_wbytes = tile_width * sizeof(uint16_t);
33 const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
34 const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
35
36 const uint16_t* i0 = input;
37 uint16_t* o0 = (uint16_t*) output;
38 uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
39 uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
40 uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
41 uint16_t* o4 = (uint16_t*) ((uintptr_t) o3 + output_stride);
42 uint16_t* o5 = (uint16_t*) ((uintptr_t) o4 + output_stride);
43 uint16_t* o6 = (uint16_t*) ((uintptr_t) o5 + output_stride);
44 uint16_t* o7 = (uint16_t*) ((uintptr_t) o6 + output_stride);
45
46 do {
47 if XNN_UNPREDICTABLE(block_width < 2) {
48 o1 = o0;
49 }
50 if XNN_UNPREDICTABLE(block_width <= 2) {
51 o2 = o0;
52 }
53 if XNN_UNPREDICTABLE(block_width < 4) {
54 o3 = o0;
55 }
56 if XNN_UNPREDICTABLE(block_width <= 4) {
57 o4 = o0;
58 }
59 if XNN_UNPREDICTABLE(block_width < 6) {
60 o5 = o0;
61 }
62 if XNN_UNPREDICTABLE(block_width <= 6) {
63 o6 = o0;
64 }
65 if XNN_UNPREDICTABLE(block_width < 8) {
66 o7 = o0;
67 }
68 size_t bh = block_height;
69 for (; bh >= 8; bh -= 8) {
70 const v128_t v3_0 = wasm_v128_load(i0);
71 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
72 const v128_t v3_1 = wasm_v128_load(i0);
73 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
74 const v128_t v3_2 = wasm_v128_load(i0);
75 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
76 const v128_t v3_3 = wasm_v128_load(i0);
77 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
78 const v128_t v3_4 = wasm_v128_load(i0);
79 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
80 const v128_t v3_5 = wasm_v128_load(i0);
81 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
82 const v128_t v3_6 = wasm_v128_load(i0);
83 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
84 const v128_t v3_7 = wasm_v128_load(i0);
85 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
86
87 const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
88 const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
89 const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
90 const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
91 const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
92 const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
93 const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
94 const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
95 const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
96 const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
97 const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
98 const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
99 const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
100 const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
101 const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
102 const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
103 const v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
104 const v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
105 const v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
106 const v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
107 const v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
108 const v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
109 const v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
110 const v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
111
112 wasm_v128_store(o7, v0_7);
113 o7 = (uint16_t*) ((uintptr_t) o7 + tile_hbytes);
114 wasm_v128_store(o6, v0_6);
115 o6 = (uint16_t*) ((uintptr_t) o6 + tile_hbytes);
116 wasm_v128_store(o5, v0_5);
117 o5 = (uint16_t*) ((uintptr_t) o5 + tile_hbytes);
118 wasm_v128_store(o4, v0_4);
119 o4 = (uint16_t*) ((uintptr_t) o4 + tile_hbytes);
120 wasm_v128_store(o3, v0_3);
121 o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
122 wasm_v128_store(o2, v0_2);
123 o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
124 wasm_v128_store(o1, v0_1);
125 o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
126 wasm_v128_store(o0, v0_0);
127 o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
128 }
129
130 if (bh != 0) {
131 const v128_t v3_0 = wasm_v128_load(i0);
132 const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
133 if XNN_UNPREDICTABLE(bh < 2) {
134 i1 = i0;
135 }
136 const v128_t v3_1 = wasm_v128_load(i1);
137 const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
138 if XNN_UNPREDICTABLE(bh <= 2) {
139 i2 = i1;
140 }
141 const v128_t v3_2 = wasm_v128_load(i2);
142 const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
143 if XNN_UNPREDICTABLE(bh < 4) {
144 i3 = i2;
145 }
146 const v128_t v3_3 = wasm_v128_load(i3);
147 const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
148 if XNN_UNPREDICTABLE(bh <= 4) {
149 i4 = i3;
150 }
151 const v128_t v3_4 = wasm_v128_load(i4);
152 const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
153 if XNN_UNPREDICTABLE(bh < 6) {
154 i5 = i4;
155 }
156 const v128_t v3_5 = wasm_v128_load(i5);
157 const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
158 if XNN_UNPREDICTABLE(bh <= 6) {
159 i6 = i5;
160 }
161 const v128_t v3_6 = wasm_v128_load(i6);
162 const v128_t v3_7 = wasm_v128_xor(v3_0, v3_0);
163
164 const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
165 const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
166 const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
167 const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
168 const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
169 const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
170 const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
171 const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
172 const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
173 const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
174 const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
175 const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
176 const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
177 const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
178 const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
179 const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
180
181 v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
182 v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
183 v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
184 v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
185 v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
186 v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
187 v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
188 v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
189
190 if (bh & 4) {
191 *((double*) o7) = wasm_f64x2_extract_lane(v0_7, 0);
192 o7 += 4;
193 *((double*) o6) = wasm_f64x2_extract_lane(v0_6, 0);
194 o6 += 4;
195 *((double*) o5) = wasm_f64x2_extract_lane(v0_5, 0);
196 o5 += 4;
197 *((double*) o4) = wasm_f64x2_extract_lane(v0_4, 0);
198 o4 += 4;
199 *((double*) o3) = wasm_f64x2_extract_lane(v0_3, 0);
200 o3 += 4;
201 *((double*) o2) = wasm_f64x2_extract_lane(v0_2, 0);
202 o2 += 4;
203 *((double*) o1) = wasm_f64x2_extract_lane(v0_1, 0);
204 o1 += 4;
205 *((double*) o0) = wasm_f64x2_extract_lane(v0_0, 0);
206 o0 += 4;
207 v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
208 v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
209 v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
210 v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
211 v0_4 = wasm_v64x2_shuffle(v0_4, v0_4, 1, 1);
212 v0_5 = wasm_v64x2_shuffle(v0_5, v0_5, 1, 1);
213 v0_6 = wasm_v64x2_shuffle(v0_6, v0_6, 1, 1);
214 v0_7 = wasm_v64x2_shuffle(v0_7, v0_7, 1, 1);
215 }
216
217 if (bh & 2) {
218 *((float*) o7) = wasm_f32x4_extract_lane(v0_7, 0);
219 o7 += 2;
220 *((float*) o6) = wasm_f32x4_extract_lane(v0_6, 0);
221 o6 += 2;
222 *((float*) o5) = wasm_f32x4_extract_lane(v0_5, 0);
223 o5 += 2;
224 *((float*) o4) = wasm_f32x4_extract_lane(v0_4, 0);
225 o4 += 2;
226 *((float*) o3) = wasm_f32x4_extract_lane(v0_3, 0);
227 o3 += 2;
228 *((float*) o2) = wasm_f32x4_extract_lane(v0_2, 0);
229 o2 += 2;
230 *((float*) o1) = wasm_f32x4_extract_lane(v0_1, 0);
231 o1 += 2;
232 *((float*) o0) = wasm_f32x4_extract_lane(v0_0, 0);
233 o0 += 2;
234 v0_0 = wasm_u64x2_shr(v0_0, 32);
235 v0_1 = wasm_u64x2_shr(v0_1, 32);
236 v0_2 = wasm_u64x2_shr(v0_2, 32);
237 v0_3 = wasm_u64x2_shr(v0_3, 32);
238 v0_4 = wasm_u64x2_shr(v0_4, 32);
239 v0_5 = wasm_u64x2_shr(v0_5, 32);
240 v0_6 = wasm_u64x2_shr(v0_6, 32);
241 v0_7 = wasm_u64x2_shr(v0_7, 32);
242 }
243 if (bh & 1) {
244 *o7 = wasm_i16x8_extract_lane(v0_7, 0);
245 *o6 = wasm_i16x8_extract_lane(v0_6, 0);
246 *o5 = wasm_i16x8_extract_lane(v0_5, 0);
247 *o4 = wasm_i16x8_extract_lane(v0_4, 0);
248 *o3 = wasm_i16x8_extract_lane(v0_3, 0);
249 *o2 = wasm_i16x8_extract_lane(v0_2, 0);
250 *o1 = wasm_i16x8_extract_lane(v0_1, 0);
251 *o0 = wasm_i16x8_extract_lane(v0_0, 0);
252 }
253 }
254
255 i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
256 o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
257 o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
258 o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
259 o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
260 o4 = (uint16_t*) ((uintptr_t) o4 + output_reset);
261 o5 = (uint16_t*) ((uintptr_t) o5 + output_reset);
262 o6 = (uint16_t*) ((uintptr_t) o6 + output_reset);
263 o7 = (uint16_t*) ((uintptr_t) o7 + output_reset);
264 block_width = doz(block_width, tile_width);
265 } while (block_width != 0);
266 }
267