xref: /aosp_15_r20/external/XNNPACK/src/x32-zip/xm-neon.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <arm_neon.h>
9 
10 #include <xnnpack/zip.h>
11 
12 
xnn_x32_zip_xm_ukernel__neon(size_t n,size_t m,const uint32_t * input,uint32_t * output)13 void xnn_x32_zip_xm_ukernel__neon(
14     size_t n,
15     size_t m,
16     const uint32_t* input,
17     uint32_t* output)
18 {
19   assert(n != 0);
20   assert(n % 4 == 0);
21   assert(m >= 4);
22 
23   const uint32_t* w = input;
24   const size_t group_increment = m * 4;
25   const size_t input_increment = n * 3;
26   const size_t output_increment = 16 - m * n;
27   const uint32_t* last_input = (const uint32_t*) ((uintptr_t) input + n * (m - 1));
28   uint32_t* last_output = (uint32_t*) ((uintptr_t) output + (m * 4 - 16));
29 
30   for (size_t i = 0; i < m; i += 4) {
31     w = (const uint32_t*) ((uintptr_t) w + input_increment);
32     if (w >= last_input) {
33       w = last_input;
34     }
35     const uint32_t* z = (const uint32_t*) ((uintptr_t) w - n);
36     const uint32_t* y = (const uint32_t*) ((uintptr_t) z - n);
37     const uint32_t* x = (const uint32_t*) ((uintptr_t) y - n);
38 
39     size_t k = n;
40     while (k >= 16) {
41       const uint32x4_t vx = vld1q_u32(x); x += 4;
42       const uint32x4_t vy = vld1q_u32(y); y += 4;
43       const uint32x4_t vz = vld1q_u32(z); z += 4;
44       const uint32x4_t vw = vld1q_u32(w); w += 4;
45 
46       const uint32x4x2_t vxy = vzipq_u32(vx, vy);
47       const uint32x4x2_t vzw = vzipq_u32(vz, vw);
48 
49       vst1_u32(output, vget_low_u32(vxy.val[0]));
50       vst1_u32(output + 2, vget_low_u32(vzw.val[0]));
51       output = (uint32_t*) ((uintptr_t) output + group_increment);
52 
53       vst1_u32(output, vget_high_u32(vxy.val[0]));
54       vst1_u32(output + 2, vget_high_u32(vzw.val[0]));
55       output = (uint32_t*) ((uintptr_t) output + group_increment);
56 
57       vst1_u32(output, vget_low_u32(vxy.val[1]));
58       vst1_u32(output + 2, vget_low_u32(vzw.val[1]));
59       output = (uint32_t*) ((uintptr_t) output + group_increment);
60 
61       vst1_u32(output, vget_high_u32(vxy.val[1]));
62       vst1_u32(output + 2, vget_high_u32(vzw.val[1]));
63       output = (uint32_t*) ((uintptr_t) output + group_increment);
64 
65       k -= 16;
66     }
67     if XNN_UNLIKELY(k != 0) {
68       if (k & 8) {
69         const uint32x2_t vx = vld1_u32(x); x += 2;
70         const uint32x2_t vy = vld1_u32(y); y += 2;
71         const uint32x2_t vz = vld1_u32(z); z += 2;
72         const uint32x2_t vw = vld1_u32(w); w += 2;
73 
74         const uint32x2x2_t vxy = vzip_u32(vx, vy);
75         const uint32x2x2_t vzw = vzip_u32(vz, vw);
76 
77         vst1_u32(output, vxy.val[0]);
78         vst1_u32(output + 2, vzw.val[0]);
79         output = (uint32_t*) ((uintptr_t) output + group_increment);
80 
81         vst1_u32(output, vxy.val[1]);
82         vst1_u32(output + 2, vzw.val[1]);
83         output = (uint32_t*) ((uintptr_t) output + group_increment);
84       }
85       if (k & 4) {
86         const uint32x2_t vx = vld1_dup_u32(x);
87         const uint32x2_t vz = vld1_dup_u32(z);
88         const uint32x2_t vxy = vld1_lane_u32(y, vx, 1);
89         const uint32x2_t vzw = vld1_lane_u32(w, vz, 1); w += 1;
90 
91         vst1_u32(output, vxy);
92         vst1_u32(output + 2, vzw);
93         output = (uint32_t*) ((uintptr_t) output + group_increment);
94       }
95     }
96     output = (uint32_t*) ((uintptr_t) output + output_increment);
97     if (output > last_output) {
98       output = last_output;
99     }
100   }
101 }
102