xref: /aosp_15_r20/external/XNNPACK/src/qs8-gemm/c16-neon-mlal.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
7$assert NR % 8 == 0
8$assert 8 <= NR <= 16
9$assert REQUANTIZATION == "RNDNU"
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/gemm.h>
15#include <xnnpack/math.h>
16
17
18void xnn_qs8_gemm_minmax_rndnu_ukernel_${MR}x${NR}c16__neon_mlal(
19    size_t mr,
20    size_t nc,
21    size_t kc,
22    const int8_t* restrict a,
23    size_t a_stride,
24    const void* restrict w,
25    int8_t* restrict c,
26    size_t cm_stride,
27    size_t cn_stride,
28    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29{
30  assert(mr != 0);
31  assert(mr <= ${MR});
32  assert(nc != 0);
33  assert(kc != 0);
34  assert(kc % sizeof(int8_t) == 0);
35  assert(a != NULL);
36  assert(w != NULL);
37  assert(c != NULL);
38
39  kc = round_up_po2(kc, 16 * sizeof(int8_t));
40  const int8_t* a0 = a;
41  int8_t* c0 = c;
42  $for M in range(1, MR):
43    const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride);
44    int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
45    $if M % 2 == 0:
46      if XNN_UNPREDICTABLE(mr <= ${M}) {
47        a${M} = a${M-1};
48        c${M} = c${M-1};
49      }
50    $elif M + 1 == MR:
51      if XNN_UNPREDICTABLE(mr != ${M+1}) {
52        a${M} = a${M-1};
53        c${M} = c${M-1};
54      }
55    $else:
56      if XNN_UNPREDICTABLE(mr < ${M+1}) {
57        a${M} = a${M-1};
58        c${M} = c${M-1};
59      }
60
61  do {
62    $for N in range(NR):
63      int32x4_t vacc0x${N} = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
64    $for M in range(1, MR):
65      $for N in range(NR):
66        int32x4_t vacc${M}x${N} = vacc0x${N};
67
68    // KC loop of 16
69    size_t k = kc;
70    while (k != 0) {
71      $for M in range(MR):
72        const int8x16_t va${M} = vld1q_s8(a${M}); a${M} += 16;
73
74      $for N in range(NR):
75        const int8x16_t vb${N} = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
76
77      $for N in range(NR):
78        $for M in range(MR):
79          int16x8_t vprod${M}x${N} = vmull_s8(vget_low_s8(vb${N}), vget_low_s8(va${M}));
80        $for M in range(MR):
81          vprod${M}x${N} = vmlal_s8(vprod${M}x${N}, vget_high_s8(vb${N}), vget_high_s8(va${M}));
82        $for M in range(MR):
83          vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N});
84
85      k -= 16 * sizeof(int8_t);
86    }
87
88#if XNN_ARCH_ARM64
89    $for M in range(MR):
90      $for N in range(0, NR, 4):
91        const int32x4_t vsum${M}x${ABC[N:N+2]} = vpaddq_s32(vacc${M}x${N}, vacc${M}x${N+1});
92        const int32x4_t vsum${M}x${ABC[N+2:N+4]} = vpaddq_s32(vacc${M}x${N+2}, vacc${M}x${N+3});
93    $for M in range(MR):
94      $for N in range(0, NR, 4):
95        int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]});
96#else
97    $for M in range(MR):
98      $for N in range(0, NR, 4):
99        const int32x2_t vpsum${M}x${ABC[N]} = vadd_s32(vget_low_s32(vacc${M}x${N}), vget_high_s32(vacc${M}x${N}));
100        const int32x2_t vpsum${M}x${ABC[N+1]} = vadd_s32(vget_low_s32(vacc${M}x${N+1}), vget_high_s32(vacc${M}x${N+1}));
101        const int32x2_t vpsum${M}x${ABC[N+2]} = vadd_s32(vget_low_s32(vacc${M}x${N+2}), vget_high_s32(vacc${M}x${N+2}));
102        const int32x2_t vpsum${M}x${ABC[N+3]} = vadd_s32(vget_low_s32(vacc${M}x${N+3}), vget_high_s32(vacc${M}x${N+3}));
103        const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vpsum${M}x${ABC[N]}, vpsum${M}x${ABC[N+1]});
104        const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vpsum${M}x${ABC[N+2]}, vpsum${M}x${ABC[N+3]});
105        int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]} );
106#endif
107
108    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
109    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
110    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
111
112    $for M in range(MR):
113      $for N in range(0, NR, 4):
114        vacc${M}x${ABC[N:N+4]} = vqshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
115
116    $for M in range(MR):
117      $for N in range(0, NR, 4):
118        vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
119
120    $for M in range(MR):
121      $for N in range(0, NR, 4):
122        vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
123
124    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
125#if XNN_ARCH_ARM64
126    $for M in range(MR):
127      $for N in range(0, NR, 8):
128        const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point);
129    $for M in range(MR):
130      $for N in range(0, NR, 16):
131        $if N + 8 < NR:
132          int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
133        $elif M % 2 == 1:
134          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
135        $elif M + 1 == MR:
136          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
137#else
138    $for M in range(MR):
139      $for N in range(0, NR, 8):
140        const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point);
141
142    $for M in range(MR):
143      $for N in range(0, NR, 16):
144        $if N + 8 < NR:
145          int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
146        $elif M % 2 == 1:
147          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]}));
148        $elif M + 1 == MR:
149          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
150#endif
151    $if NR == 8 and MR == 1:
152      const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
153      const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
154    $else:
155      const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
156      const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
157
158    $for M in range(MR):
159      $for N in range(0, NR, 16):
160        $if N + 8 < NR:
161          vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min);
162        $elif M % 2 == 1:
163          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
164        $elif M + 1 == MR:
165          $if NR == 8 and MR == 1:
166            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min);
167          $else:
168            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min));
169
170    $for M in range(MR):
171      $for N in range(0, NR, 16):
172        $if N + 8 < NR:
173          vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max);
174        $elif M % 2 == 1:
175          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
176        $elif M + 1 == MR:
177          $if NR == 8 and MR == 1:
178            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max);
179          $else:
180            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max));
181
182    if (nc >= ${NR}) {
183      $for M in range(MR):
184        $for N in range(0, NR, 16):
185          $if N + 8 < NR:
186            vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
187          $elif M % 2 == 1:
188            vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
189            vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
190          $elif M + 1 == MR:
191            vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
192
193      $for M in range(MR):
194        c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
195
196      $for M in range(MR):
197        a${M} = (const int8_t*) ((uintptr_t) a${M} - kc);
198
199      nc -= ${NR};
200    } else {
201      // Final case where not all of the ${NR} columns fit in the destination.
202      $if NR == 16:
203        $for M in range(MR):
204          $if M % 2 == 1:
205            int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF));
206          $elif M + 1 == MR:
207            int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF);
208        if (nc & 8) {
209          $for M in range(MR):
210            $if M % 2 == 1:
211              vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8;
212              vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8;
213            $elif M + 1 == MR:
214              vst1_s8(c${M}, vout${M}x01234567); c${M} += 8;
215          $for M in range(MR):
216            $if M % 2 == 1:
217              vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF));
218            $elif M + 1 == MR:
219              vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF);
220        }
221      if (nc & 4) {
222        $for M in range(MR):
223          $if M % 2 == 1:
224            vst1q_lane_u32((void*) c${M-1}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
225            vst1q_lane_u32((void*) c${M}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
226          $elif M + 1 == MR:
227            vst1_lane_u32((void*) c${M}, vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4;
228        $for M in range(MR):
229          $if M % 2 == 1:
230            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
231          $elif M + 1 == MR:
232            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4);
233      }
234      if (nc & 2) {
235        $for M in range(MR):
236          $if M % 2 == 1:
237            vst1q_lane_u16((void*) c${M-1}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
238            vst1q_lane_u16((void*) c${M}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
239          $elif M + 1 == MR:
240            vst1_lane_u16((void*) c${M}, vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2;
241        $for M in range(MR):
242          $if M % 2 == 1:
243            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
244          $elif M + 1 == MR:
245            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2);
246      }
247      if (nc & 1) {
248        $for M in range(MR):
249          $if M % 2 == 1:
250            vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
251            vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
252          $elif M + 1 == MR:
253            vst1_lane_s8(c${M}, vout${M}x01234567, 0);
254      }
255
256      nc = 0;
257    }
258  } while (nc != 0);
259}
260