xref: /aosp_15_r20/external/XNNPACK/src/f16-vmulcaddc/gen/c16-minmax-fma3-2x.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-vmulcaddc/fma3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/vmulcaddc.h>
17 
18 
xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x(size_t rows,size_t channels,const void * restrict input,size_t input_stride,const void * restrict weights,void * restrict output,size_t output_stride,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x(
20     size_t rows,
21     size_t channels,
22     const void*restrict input,
23     size_t input_stride,
24     const void*restrict weights,
25     void*restrict output,
26     size_t output_stride,
27     const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(rows != 0);
30   assert(channels != 0);
31   assert(channels % sizeof(uint16_t) == 0);
32 
33   const uint16_t* i0 = (const uint16_t*) input;
34   uint16_t* o0 = (uint16_t*) output;
35   const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
36   uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
37 
38   const size_t input_increment = input_stride * 2 - channels;
39   const size_t output_increment = output_stride * 2 - channels;
40 
41   const __m256 vmin = _mm256_load_ps(params->avx.min);
42   const __m256 vmax = _mm256_load_ps(params->avx.max);
43   do {
44     if XNN_UNPREDICTABLE(rows < 2) {
45       i1 = i0;
46       o1 = o0;
47     }
48 
49     const uint16_t* w = (const uint16_t*) weights;
50     size_t c = channels;
51     for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
52       const __m256 vscale01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
53       const __m256 vscale89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
54 
55       __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
56       __m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
57       i0 += 16;
58       __m256 vacc1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
59       __m256 vacc1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
60       i1 += 16;
61 
62       const __m256 vbias01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
63       const __m256 vbias89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 24)));
64       w += 32;
65 
66       vacc0x01234567 = _mm256_fmadd_ps(vacc0x01234567, vscale01234567, vbias01234567);
67       vacc0x89ABCDEF = _mm256_fmadd_ps(vacc0x89ABCDEF, vscale89ABCDEF, vbias89ABCDEF);
68       vacc1x01234567 = _mm256_fmadd_ps(vacc1x01234567, vscale01234567, vbias01234567);
69       vacc1x89ABCDEF = _mm256_fmadd_ps(vacc1x89ABCDEF, vscale89ABCDEF, vbias89ABCDEF);
70 
71       vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
72       vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
73       vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
74       vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
75 
76       vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
77       vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
78       vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
79       vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
80 
81       _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
82       _mm_storeu_si128((__m128i*) (o0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_NO_EXC));
83       o0 += 16;
84       _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
85       _mm_storeu_si128((__m128i*) (o1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_NO_EXC));
86       o1 += 16;
87     }
88     for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
89       const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
90 
91       __m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
92       i0 += 8;
93       __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
94       i1 += 8;
95 
96       const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
97       w += 8;
98 
99       vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
100       vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
101 
102       vacc0 = _mm256_max_ps(vacc0, vmin);
103       vacc1 = _mm256_max_ps(vacc1, vmin);
104 
105       vacc0 = _mm256_min_ps(vacc0, vmax);
106       vacc1 = _mm256_min_ps(vacc1, vmax);
107 
108       _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0, _MM_FROUND_NO_EXC));
109       o0 += 8;
110       _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1, _MM_FROUND_NO_EXC));
111       o1 += 8;
112     }
113     if XNN_UNLIKELY(c != 0) {
114       const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
115 
116       __m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
117       i0 = (const uint16_t*) ((uintptr_t) i0 + c);
118       __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
119       i1 = (const uint16_t*) ((uintptr_t) i1 + c);
120 
121       const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
122 
123       vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
124       vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
125 
126       vacc0 = _mm256_max_ps(vacc0, vmin);
127       vacc1 = _mm256_max_ps(vacc1, vmin);
128 
129       vacc0 = _mm256_min_ps(vacc0, vmax);
130       vacc1 = _mm256_min_ps(vacc1, vmax);
131 
132       __m128i vh0 = _mm256_cvtps_ph(vacc0, _MM_FROUND_NO_EXC);
133       __m128i vh1 = _mm256_cvtps_ph(vacc1, _MM_FROUND_NO_EXC);
134 
135       if (c & (4 * sizeof(uint16_t))) {
136         _mm_storel_epi64((__m128i*) o0, vh0);
137         _mm_storel_epi64((__m128i*) o1, vh1);
138 
139         vh0 = _mm_unpackhi_epi64(vh0, vh0);
140         vh1 = _mm_unpackhi_epi64(vh1, vh1);
141 
142         o0 += 4;
143         o1 += 4;
144       }
145       if (c & (2 * sizeof(uint16_t))) {
146         _mm_storeu_si32(o0, vh0);
147         _mm_storeu_si32(o1, vh1);
148 
149         vh0 = _mm_srli_epi64(vh0, 32);
150         vh1 = _mm_srli_epi64(vh1, 32);
151 
152         o0 += 2;
153         o1 += 2;
154       }
155       if (c & (1 * sizeof(uint16_t))) {
156         *o0 = (uint16_t) _mm_extract_epi16(vh0, 0);
157         *o1 = (uint16_t) _mm_extract_epi16(vh1, 0);
158 
159         o0 += 1;
160         o1 += 1;
161       }
162     }
163     i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
164     o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
165     i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
166     o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
167     rows = doz(rows, 2);
168   } while (rows != 0);
169 }
170