1 /*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #pragma once
26
27 #ifdef __ARM_FEATURE_SVE
28
29
30 namespace {
31
sve_transpose_interleave_4VL_1x4(uint8_t * out,const uint8_t * in,size_t width,size_t in_stride,size_t height)32 void sve_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
33 {
34 uint8_t *pad_row = reinterpret_cast<uint8_t *>(alloca(width * sizeof(uint8_t)));
35
36 if (height % 4) {
37 memset(pad_row, 0, width * sizeof(uint8_t));
38 }
39
40 size_t out_stride = 4 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
41
42 __asm__ __volatile__(
43 "ptrue p1.b\n"
44 "cmp %x[height], #0x8\n"
45 "blt 6f\n"
46 "1:" // Main row loop: Head
47 "mov x9, %x[in]\n"
48 "mov x28, %x[out]\n"
49 "add x27, x9, %x[in_stride]\n"
50 "add x26, x27, %x[in_stride]\n"
51 "add x25, x26, %x[in_stride]\n"
52 "add x24, x25, %x[in_stride]\n"
53 "add x23, x24, %x[in_stride]\n"
54 "add x22, x23, %x[in_stride]\n"
55 "add x21, x22, %x[in_stride]\n"
56 "add %x[in], x21, %x[in_stride]\n"
57 "sub %x[height], %x[height], #0x8\n"
58 "mov x20, %x[width]\n"
59 "cntb x19, ALL, MUL #2\n"
60 "cmp x20, x19\n"
61 "blt 3f\n"
62 "2:" // Main row loop: Unroll column loop
63 "ld1b { z17.b }, p1/Z, [x9]\n"
64 "sub x20, x20, x19\n"
65 "ld1b { z3.b }, p1/Z, [x9, #1, MUL VL]\n"
66 "addvl x9, x9, #2\n"
67 "ld1b { z20.b }, p1/Z, [x27]\n"
68 "cmp x20, x19\n"
69 "ld1b { z2.b }, p1/Z, [x27, #1, MUL VL]\n"
70 "addvl x27, x27, #2\n"
71 "ld1b { z16.b }, p1/Z, [x26]\n"
72 "zip1 z18.b, z17.b, z16.b\n"
73 "ld1b { z1.b }, p1/Z, [x26, #1, MUL VL]\n"
74 "addvl x26, x26, #2\n"
75 "zip2 z19.b, z17.b, z16.b\n"
76 "ld1b { z17.b }, p1/Z, [x25]\n"
77 "ld1b { z0.b }, p1/Z, [x25, #1, MUL VL]\n"
78 "zip1 z31.b, z3.b, z1.b\n"
79 "ld1b { z30.b }, p1/Z, [x24]\n"
80 "addvl x25, x25, #2\n"
81 "zip1 z16.b, z20.b, z17.b\n"
82 "ld1b { z29.b }, p1/Z, [x24, #1, MUL VL]\n"
83 "addvl x24, x24, #2\n"
84 "zip1 z28.b, z18.b, z16.b\n"
85 "ld1b { z27.b }, p1/Z, [x23]\n"
86 "zip2 z26.b, z18.b, z16.b\n"
87 "ld1b { z25.b }, p1/Z, [x23, #1, MUL VL]\n"
88 "addvl x23, x23, #2\n"
89 "zip2 z18.b, z20.b, z17.b\n"
90 "ld1b { z16.b }, p1/Z, [x22]\n"
91 "zip1 z24.b, z2.b, z0.b\n"
92 "ld1b { z23.b }, p1/Z, [x22, #1, MUL VL]\n"
93 "addvl x22, x22, #2\n"
94 "zip1 z17.b, z19.b, z18.b\n"
95 "ld1b { z22.b }, p1/Z, [x21]\n"
96 "zip2 z21.b, z19.b, z18.b\n"
97 "ld1b { z20.b }, p1/Z, [x21, #1, MUL VL]\n"
98 "addvl x21, x21, #2\n"
99 "zip1 z19.b, z30.b, z16.b\n"
100 "st1b { z28.b }, p1, [x28]\n"
101 "zip2 z18.b, z30.b, z16.b\n"
102 "st1b { z26.b }, p1, [x28, #1, MUL VL]\n"
103 "zip1 z16.b, z27.b, z22.b\n"
104 "st1b { z17.b }, p1, [x28, #2, MUL VL]\n"
105 "zip1 z17.b, z19.b, z16.b\n"
106 "st1b { z21.b }, p1, [x28, #3, MUL VL]\n"
107 "zip2 z16.b, z19.b, z16.b\n"
108 "st1b { z17.b }, p1, [x28, #4, MUL VL]\n"
109 "zip2 z17.b, z27.b, z22.b\n"
110 "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
111 "zip1 z16.b, z18.b, z17.b\n"
112 "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
113 "zip2 z16.b, z18.b, z17.b\n"
114 "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
115 "add x28, x28, %x[out_stride]\n"
116 "zip1 z16.b, z31.b, z24.b\n"
117 "st1b { z16.b }, p1, [x28]\n"
118 "zip2 z16.b, z31.b, z24.b\n"
119 "zip2 z18.b, z3.b, z1.b\n"
120 "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
121 "zip2 z17.b, z2.b, z0.b\n"
122 "zip1 z16.b, z18.b, z17.b\n"
123 "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
124 "zip2 z16.b, z18.b, z17.b\n"
125 "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
126 "zip1 z18.b, z29.b, z23.b\n"
127 "zip1 z17.b, z25.b, z20.b\n"
128 "zip1 z16.b, z18.b, z17.b\n"
129 "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
130 "zip2 z16.b, z18.b, z17.b\n"
131 "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
132 "zip2 z18.b, z29.b, z23.b\n"
133 "zip2 z17.b, z25.b, z20.b\n"
134 "zip1 z16.b, z18.b, z17.b\n"
135 "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
136 "zip2 z16.b, z18.b, z17.b\n"
137 "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
138 "add x28, x28, %x[out_stride]\n"
139 "bge 2b\n"
140 "3:" // Main row loop: Unroll column loop skip
141 "cbz x20, 5f\n"
142 "4:" // Main row loop: Column loop
143 "whilelt p0.b, XZR, x20\n"
144 "ld1b { z17.b }, p0/Z, [x9]\n"
145 "addvl x9, x9, #1\n"
146 "ld1b { z25.b }, p0/Z, [x27]\n"
147 "addvl x27, x27, #1\n"
148 "ld1b { z16.b }, p0/Z, [x26]\n"
149 "zip1 z18.b, z17.b, z16.b\n"
150 "ld1b { z24.b }, p0/Z, [x25]\n"
151 "addvl x26, x26, #1\n"
152 "zip2 z23.b, z17.b, z16.b\n"
153 "ld1b { z22.b }, p0/Z, [x24]\n"
154 "addvl x25, x25, #1\n"
155 "zip1 z16.b, z25.b, z24.b\n"
156 "ld1b { z21.b }, p0/Z, [x23]\n"
157 "addvl x24, x24, #1\n"
158 "zip1 z17.b, z18.b, z16.b\n"
159 "ld1b { z20.b }, p0/Z, [x22]\n"
160 "addvl x23, x23, #1\n"
161 "zip2 z18.b, z18.b, z16.b\n"
162 "ld1b { z19.b }, p0/Z, [x21]\n"
163 "addvl x22, x22, #1\n"
164 "zip2 z16.b, z25.b, z24.b\n"
165 "st1b { z17.b }, p1, [x28]\n"
166 "addvl x21, x21, #1\n"
167 "zip1 z17.b, z23.b, z16.b\n"
168 "st1b { z18.b }, p1, [x28, #1, MUL VL]\n"
169 "decw x20, ALL, MUL #4\n"
170 "zip2 z16.b, z23.b, z16.b\n"
171 "st1b { z17.b }, p1, [x28, #2, MUL VL]\n"
172 "cmp x20, #0x0\n"
173 "zip1 z18.b, z22.b, z20.b\n"
174 "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
175 "zip1 z17.b, z21.b, z19.b\n"
176 "zip1 z16.b, z18.b, z17.b\n"
177 "st1b { z16.b }, p1, [x28, #4, MUL VL]\n"
178 "zip2 z16.b, z18.b, z17.b\n"
179 "st1b { z16.b }, p1, [x28, #5, MUL VL]\n"
180 "zip2 z18.b, z22.b, z20.b\n"
181 "zip2 z17.b, z21.b, z19.b\n"
182 "zip1 z16.b, z18.b, z17.b\n"
183 "st1b { z16.b }, p1, [x28, #6, MUL VL]\n"
184 "zip2 z16.b, z18.b, z17.b\n"
185 "st1b { z16.b }, p1, [x28, #7, MUL VL]\n"
186 "add x28, x28, %x[out_stride]\n"
187 "bgt 4b\n"
188 "5:" // Main row loop: Column loop skip
189 "addvl %x[out], %x[out], #8\n"
190 "cmp %x[height], #0x8\n"
191 "bge 1b\n"
192 "cbz %x[height], 12f\n"
193 "6:" // Main loop skip
194
195 "7:" // Tail row loop: Head
196 "mov x9, %x[in]\n"
197 "mov x28, %x[out]\n"
198 "add x27, x9, %x[in_stride]\n"
199 "add x26, x27, %x[in_stride]\n"
200 "add x25, x26, %x[in_stride]\n"
201 "add %x[in], x25, %x[in_stride]\n"
202 "cmp %x[height], #0x3\n"
203 "csel x25, x25, %x[pad_row], GT\n"
204 "csel x26, x26, %x[pad_row], GE\n"
205 "cmp %x[height], #0x1\n"
206 "csel x27, x27, %x[pad_row], GT\n"
207 "sub %x[height], %x[height], #0x4\n"
208 "mov x20, %x[width]\n"
209 "cntb x19, ALL, MUL #2\n"
210 "cmp x20, x19\n"
211 "blt 9f\n"
212 "8:" // Tail row loop: Unroll column loop
213 "ld1b { z18.b }, p1/Z, [x9]\n"
214 "sub x20, x20, x19\n"
215 "ld1b { z19.b }, p1/Z, [x9, #1, MUL VL]\n"
216 "addvl x9, x9, #2\n"
217 "ld1b { z25.b }, p1/Z, [x27]\n"
218 "cmp x20, x19\n"
219 "ld1b { z24.b }, p1/Z, [x27, #1, MUL VL]\n"
220 "addvl x27, x27, #2\n"
221 "ld1b { z17.b }, p1/Z, [x26]\n"
222 "zip1 z23.b, z18.b, z17.b\n"
223 "ld1b { z16.b }, p1/Z, [x26, #1, MUL VL]\n"
224 "addvl x26, x26, #2\n"
225 "zip2 z22.b, z18.b, z17.b\n"
226 "ld1b { z18.b }, p1/Z, [x25]\n"
227 "ld1b { z21.b }, p1/Z, [x25, #1, MUL VL]\n"
228 "zip1 z20.b, z19.b, z16.b\n"
229 "addvl x25, x25, #2\n"
230 "zip2 z19.b, z19.b, z16.b\n"
231 "zip1 z17.b, z25.b, z18.b\n"
232 "zip1 z16.b, z23.b, z17.b\n"
233 "st1b { z16.b }, p1, [x28]\n"
234 "zip2 z16.b, z23.b, z17.b\n"
235 "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
236 "zip2 z17.b, z25.b, z18.b\n"
237 "zip1 z16.b, z22.b, z17.b\n"
238 "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
239 "zip2 z16.b, z22.b, z17.b\n"
240 "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
241 "add x28, x28, %x[out_stride]\n"
242 "zip1 z18.b, z24.b, z21.b\n"
243 "zip2 z17.b, z24.b, z21.b\n"
244 "zip1 z16.b, z20.b, z18.b\n"
245 "st1b { z16.b }, p1, [x28]\n"
246 "zip2 z16.b, z20.b, z18.b\n"
247 "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
248 "zip1 z16.b, z19.b, z17.b\n"
249 "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
250 "zip2 z16.b, z19.b, z17.b\n"
251 "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
252 "add x28, x28, %x[out_stride]\n"
253 "bge 8b\n"
254 "9:" // Tail row loop: Unroll column loop skip
255 "cbz x20, 11f\n"
256 "10:" // Tail row loop: Column loop
257 "whilelt p0.b, XZR, x20\n"
258 "ld1b { z18.b }, p0/Z, [x9]\n"
259 "addvl x9, x9, #1\n"
260 "ld1b { z21.b }, p0/Z, [x27]\n"
261 "addvl x27, x27, #1\n"
262 "ld1b { z17.b }, p0/Z, [x26]\n"
263 "zip1 z20.b, z18.b, z17.b\n"
264 "ld1b { z16.b }, p0/Z, [x25]\n"
265 "addvl x26, x26, #1\n"
266 "zip2 z19.b, z18.b, z17.b\n"
267 "addvl x25, x25, #1\n"
268 "decw x20, ALL, MUL #4\n"
269 "zip1 z18.b, z21.b, z16.b\n"
270 "cmp x20, #0x0\n"
271 "zip2 z17.b, z21.b, z16.b\n"
272 "zip1 z16.b, z20.b, z18.b\n"
273 "st1b { z16.b }, p1, [x28]\n"
274 "zip2 z16.b, z20.b, z18.b\n"
275 "st1b { z16.b }, p1, [x28, #1, MUL VL]\n"
276 "zip1 z16.b, z19.b, z17.b\n"
277 "st1b { z16.b }, p1, [x28, #2, MUL VL]\n"
278 "zip2 z16.b, z19.b, z17.b\n"
279 "st1b { z16.b }, p1, [x28, #3, MUL VL]\n"
280 "add x28, x28, %x[out_stride]\n"
281 "bgt 10b\n"
282 "11:" // Tail row loop: Column loop skip
283 "addvl %x[out], %x[out], #4\n"
284 "cmp %x[height], #0x1\n"
285 "bge 7b\n"
286 "12:" // Done
287
288 : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
289 : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
290 : "cc", "memory", "p0", "p1", "x9", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
291 );
292 }
293
294 } // anonymous namespace
295
296 template<>
Transform(uint8_t * out,const uint8_t * in,int stride,int x0,int xmax,int k0,int kmax)297 void Transform<4, 4, true, VLType::SVE>(
298 uint8_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
299 {
300 sve_transpose_interleave_4VL_1x4(
301 reinterpret_cast<uint8_t *>(out),
302 reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
303 (xmax-x0) * sizeof(uint8_t) / 1,
304 stride * sizeof(uint8_t),
305 (kmax-k0)
306 );
307 }
308
309 template<>
Transform(int8_t * out,const int8_t * in,int stride,int x0,int xmax,int k0,int kmax)310 void Transform<4, 4, true, VLType::SVE>(
311 int8_t *out, const int8_t *in, int stride, int x0, int xmax, int k0, int kmax)
312 {
313 sve_transpose_interleave_4VL_1x4(
314 reinterpret_cast<uint8_t *>(out),
315 reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
316 (xmax-x0) * sizeof(int8_t) / 1,
317 stride * sizeof(int8_t),
318 (kmax-k0)
319 );
320 }
321
322 #endif
323