xref: /aosp_15_r20/external/XNNPACK/src/f32-vmulcaddc/gen/c8-minmax-neon-2x.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vmulcaddc/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/math.h>
15 #include <xnnpack/vmulcaddc.h>
16 
17 
xnn_f32_vmulcaddc_minmax_ukernel_c8__neon_2x(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vmulcaddc_minmax_ukernel_c8__neon_2x(
19     size_t rows,
20     size_t channels,
21     const float*restrict input,
22     size_t input_stride,
23     const float*restrict weights,
24     float*restrict output,
25     size_t output_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28   assert(rows != 0);
29   assert(channels != 0);
30   assert(channels % sizeof(float) == 0);
31 
32   const float* i0 = input;
33   float* o0 = output;
34   const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
35   float* o1 = (float*) ((uintptr_t) o0 + output_stride);
36 
37   const size_t input_increment = input_stride * 2 - channels;
38   const size_t output_increment = output_stride * 2 - channels;
39 
40   const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
41   const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
42   do {
43     if XNN_UNPREDICTABLE(rows < 2) {
44       i1 = i0;
45       o1 = o0;
46     }
47 
48     const float* w = weights;
49     size_t c = channels;
50     for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
51       const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
52       const float32x4_t vscale4567 = vld1q_f32(w); w += 4;
53 
54       float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
55       float32x4_t vacc0x4567 = vld1q_f32(i0); i0 += 4;
56       float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
57       float32x4_t vacc1x4567 = vld1q_f32(i1); i1 += 4;
58 
59       vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
60       vacc0x4567 = vmulq_f32(vacc0x4567, vscale4567);
61       vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
62       vacc1x4567 = vmulq_f32(vacc1x4567, vscale4567);
63 
64       const float32x4_t vbias0123 = vld1q_f32(w); w += 4;
65       const float32x4_t vbias4567 = vld1q_f32(w); w += 4;
66 
67       vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
68       vacc0x4567 = vaddq_f32(vacc0x4567, vbias4567);
69       vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
70       vacc1x4567 = vaddq_f32(vacc1x4567, vbias4567);
71 
72       vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
73       vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
74       vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
75       vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
76 
77       vacc0x0123 = vminq_f32(vacc0x0123, vmax);
78       vacc0x4567 = vminq_f32(vacc0x4567, vmax);
79       vacc1x0123 = vminq_f32(vacc1x0123, vmax);
80       vacc1x4567 = vminq_f32(vacc1x4567, vmax);
81 
82       vst1q_f32(o0, vacc0x0123); o0 += 4;
83       vst1q_f32(o0, vacc0x4567); o0 += 4;
84       vst1q_f32(o1, vacc1x0123); o1 += 4;
85       vst1q_f32(o1, vacc1x4567); o1 += 4;
86     }
87     for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
88       const float32x4_t vscale0123 = vld1q_f32(w); w += 4;
89 
90       float32x4_t vacc0x0123 = vld1q_f32(i0); i0 += 4;
91       float32x4_t vacc1x0123 = vld1q_f32(i1); i1 += 4;
92 
93       vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
94       vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
95 
96       const float32x4_t vbias0123 = vld1q_f32(w + 4);
97 
98       vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
99       vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
100 
101       vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
102       vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
103 
104       vacc0x0123 = vminq_f32(vacc0x0123, vmax);
105       vacc1x0123 = vminq_f32(vacc1x0123, vmax);
106 
107       vst1q_f32(o0, vacc0x0123); o0 += 4;
108       vst1q_f32(o1, vacc1x0123); o1 += 4;
109     }
110     if XNN_UNLIKELY(c != 0) {
111       const float32x4_t vscale0123 = vld1q_f32(w);
112 
113       float32x4_t vacc0x0123 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + c);
114       float32x4_t vacc1x0123 = vld1q_f32(i1); i1 = (const float*) ((uintptr_t) i1 + c);
115 
116       vacc0x0123 = vmulq_f32(vacc0x0123, vscale0123);
117       vacc1x0123 = vmulq_f32(vacc1x0123, vscale0123);
118 
119       const float32x4_t vbias0123 = vld1q_f32(w + 8);
120 
121       vacc0x0123 = vaddq_f32(vacc0x0123, vbias0123);
122       vacc1x0123 = vaddq_f32(vacc1x0123, vbias0123);
123 
124       vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
125       vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
126 
127       vacc0x0123 = vminq_f32(vacc0x0123, vmax);
128       vacc1x0123 = vminq_f32(vacc1x0123, vmax);
129 
130       float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
131       float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
132       if (c & (2 * sizeof(float))) {
133         vst1_f32(o0, vacc0x01); o0 += 2;
134         vst1_f32(o1, vacc1x01); o1 += 2;
135 
136         vacc0x01 = vget_high_f32(vacc0x0123);
137         vacc1x01 = vget_high_f32(vacc1x0123);
138       }
139       if (c & (1 * sizeof(float))) {
140         vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
141         vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
142       }
143     }
144     i0 = (const float*) ((uintptr_t) i0 + input_increment);
145     o0 = (float*) ((uintptr_t) o0 + output_increment);
146     i1 = (const float*) ((uintptr_t) i1 + input_increment);
147     o1 = (float*) ((uintptr_t) o1 + output_increment);
148     rows = doz(rows, 2);
149   } while (rows != 0);
150 }
151