xref: /aosp_15_r20/external/XNNPACK/src/f32-spmm/gen/4x1-minmax-neon-pipelined.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-spmm/neon-pipelined.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/spmm.h>
15 
16 
xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_4x1__neon_pipelined(
18     size_t mc,
19     size_t nc,
20     const float*restrict input,
21     const float*restrict weights,
22     const int32_t*restrict widx_dmap,
23     const uint32_t*restrict nidx_nnzmap,
24     float*restrict output,
25     size_t output_stride,
26     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28   assert(mc != 0);
29   assert(mc % sizeof(float) == 0);
30   assert(nc != 0);
31 
32   const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
33   const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
34   size_t output_decrement = output_stride * nc - 4 * sizeof(float);
35   while XNN_LIKELY(mc >= 4 * sizeof(float)) {
36     const float*restrict w = weights;
37     const int32_t* dmap = widx_dmap;
38     const uint32_t* nnzmap = nidx_nnzmap;
39     float32x4_t vw = vld1q_dup_f32(w); w += 1;
40     intptr_t diff = *dmap++;
41     float32x4_t vi0123 = vld1q_f32(input);
42     size_t n = nc;
43     do {
44       uint32_t nnz = *nnzmap++;
45       float32x4_t vacc0123 = vw;
46       vw = vld1q_dup_f32(w); w += 1;
47       if XNN_LIKELY(nnz != 0) {
48         do {
49           vacc0123 = vmlaq_f32(vacc0123, vi0123, vw);
50           input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
51           __builtin_prefetch(input + 16);
52           diff = *dmap++;
53           vw = vld1q_dup_f32(w); w += 1;
54           __builtin_prefetch(w + 32);
55           vi0123 = vld1q_f32(input);
56         } while (--nnz != 0);
57       }
58       float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
59       vout0123 = vmaxq_f32(vout0123, vmin);
60       vst1q_f32(output, vout0123);
61       output = (float*restrict) ((uintptr_t) output + output_stride);
62     } while (--n != 0);
63     output = (float*restrict) ((uintptr_t) output - output_decrement);
64     input += 4;
65     mc -= 4 * sizeof(float);
66   }
67   if XNN_UNLIKELY(mc != 0) {
68     output_decrement += 2 * sizeof(float);
69     if (mc & (2 * sizeof(float))) {
70       const float*restrict w = weights;
71       const int32_t* dmap = widx_dmap;
72       const uint32_t* nnzmap = nidx_nnzmap;
73       size_t n = nc;
74       do {
75         uint32_t nnz = *nnzmap++;
76         float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
77         if XNN_LIKELY(nnz != 0) {
78           do {
79             const intptr_t diff = *dmap++;
80             const float32x2_t vi01 = vld1_f32(input);
81             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
82             __builtin_prefetch(input + 16);
83             const float32x2_t vb = vld1_dup_f32(w); w += 1;
84             __builtin_prefetch(w + 32);
85             vacc01 = vmla_f32(vacc01, vi01, vb);
86           } while (--nnz != 0);
87         }
88         float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
89         vout01 = vmax_f32(vout01, vget_low_f32(vmin));
90         vst1_f32(output, vout01);
91         output = (float*restrict) ((uintptr_t) output + output_stride);
92       } while (--n != 0);
93       output = (float*restrict) ((uintptr_t) output - output_decrement);
94       input += 2;
95     }
96     output_decrement += 1 * sizeof(float);
97     if (mc & (1 * sizeof(float))) {
98       const float*restrict w = weights;
99       const int32_t* dmap = widx_dmap;
100       const uint32_t* nnzmap = nidx_nnzmap;
101       size_t n = nc;
102       do {
103         uint32_t nnz = *nnzmap++;
104         float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
105         if XNN_LIKELY(nnz != 0) {
106           do {
107             const intptr_t diff = *dmap++;
108             const float32x2_t vi0 = vld1_dup_f32(input);
109             input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
110             __builtin_prefetch(input + 16);
111             const float32x2_t vb = vld1_dup_f32(w); w += 1;
112             __builtin_prefetch(w + 32);
113             vacc0 = vmla_f32(vacc0, vi0, vb);
114           } while (--nnz != 0);
115         }
116         float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
117         vout0 = vmax_f32(vout0, vget_low_f32(vmin));
118         vst1_lane_f32(output, vout0, 0);
119         output = (float*restrict) ((uintptr_t) output + output_stride);
120       } while (--n != 0);
121       output = (float*restrict) ((uintptr_t) output - output_decrement);
122       input += 1;
123     }
124   }
125 }
126