1 // Auto-generated file. Do not edit!
2 // Template: src/s16-window/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11 #include <stddef.h>
12 #include <stdint.h>
13
14 #include <arm_neon.h>
15
16 #include <xnnpack/math.h>
17 #include <xnnpack/window.h>
18
19
xnn_s16_window_shift12_ukernel__neon_x16(size_t rows,size_t batch_size,const int16_t * input,const int16_t * weights,int16_t * output,uint32_t shift)20 void xnn_s16_window_shift12_ukernel__neon_x16(
21 size_t rows,
22 size_t batch_size,
23 const int16_t* input,
24 const int16_t* weights,
25 int16_t* output,
26 uint32_t shift)
27 {
28 assert(rows != 0);
29 assert(batch_size != 0);
30 assert(input != NULL);
31 assert(weights != NULL);
32 assert(output != NULL);
33 assert(shift == 12);
34
35
36 do {
37 const int16_t* w = weights;
38 size_t n = batch_size * sizeof(int16_t);
39 for (; n >= 16 * sizeof(int16_t); n -= 16 * sizeof(int16_t)) {
40 const int16x8_t vi0 = vld1q_s16(input); input += 8;
41 const int16x8_t vi1 = vld1q_s16(input); input += 8;
42
43 const int16x8_t vw0 = vld1q_s16(w); w += 8;
44 const int16x8_t vw1 = vld1q_s16(w); w += 8;
45
46 int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
47 int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
48 int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
49 int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
50
51 const int16x4_t vshift0_lo = vqshrn_n_s32(vacc0_lo, 12);
52 const int16x4_t vshift0_hi = vqshrn_n_s32(vacc0_hi, 12);
53 const int16x4_t vshift1_lo = vqshrn_n_s32(vacc1_lo, 12);
54 const int16x4_t vshift1_hi = vqshrn_n_s32(vacc1_hi, 12);
55
56 const int16x8_t vout0 = vcombine_s16(vshift0_lo, vshift0_hi);
57 const int16x8_t vout1 = vcombine_s16(vshift1_lo, vshift1_hi);
58
59 vst1q_s16(output, vout0); output += 8;
60 vst1q_s16(output, vout1); output += 8;
61 }
62
63 // Remainder of full vectors
64 for (; n >= 8 * sizeof(int16_t); n -= 8 * sizeof(int16_t)) {
65 const int16x8_t vi = vld1q_s16(input); input += 8;
66 const int16x8_t vw = vld1q_s16(w); w += 8;
67 int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
68 int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
69 const int16x4_t vshift_lo = vqshrn_n_s32(vacc_lo, 12);
70 const int16x4_t vshift_hi = vqshrn_n_s32(vacc_hi, 12);
71 const int16x8_t vout = vcombine_s16(vshift_lo, vshift_hi);
72 vst1q_s16(output, vout); output += 8;
73 }
74
75 assert(n % 2 == 0);
76 // Remainder of 1 to 7 batch_size
77 if XNN_UNLIKELY(n != 0) {
78 const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + n);
79 const int16x8_t vw = vld1q_s16(w);
80 int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
81 int16x4_t vout = vqshrn_n_s32(vacc, 12);
82 if (n & (4 * sizeof(int16_t))) {
83 vst1_s16(output, vout); output += 4;
84 vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
85 vout = vqshrn_n_s32(vacc, 12);
86 }
87 if (n & (2 * sizeof(int16_t))) {
88 vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
89 vout = vext_s16(vout, vout, 2);
90 }
91 if (n & (1 * sizeof(int16_t))) {
92 vst1_lane_s16(output, vout, 0); output += 1;
93 }
94 }
95
96 } while (--rows != 0);
97 }
98