xref: /aosp_15_r20/external/XNNPACK/src/f16-dwconv/gen/up16x3-minmax-fma3-acc2.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-dwconv/up-fma3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f16_dwconv_minmax_ukernel_up16x3__fma3_acc2(size_t channels,size_t output_width,const void ** input,const void * weights,void * output,size_t input_stride,size_t output_increment,size_t input_offset,const void * zero,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_dwconv_minmax_ukernel_up16x3__fma3_acc2(
19     size_t channels,
20     size_t output_width,
21     const void** input,
22     const void* weights,
23     void* output,
24     size_t input_stride,
25     size_t output_increment,
26     size_t input_offset,
27     const void* zero,
28     const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(channels != 0);
31   assert(output_width != 0);
32 
33   const __m256 vmax = _mm256_load_ps(params->avx.max);
34   const __m256 vmin = _mm256_load_ps(params->avx.min);
35 
36   uint16_t* o = (uint16_t*) output;
37   do {
38     const uint16_t* i0 = input[0];
39     assert(i0 != NULL);
40     if XNN_UNPREDICTABLE(i0 != zero) {
41       i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
42     }
43     const uint16_t* i1 = input[1];
44     assert(i1 != NULL);
45     if XNN_UNPREDICTABLE(i1 != zero) {
46       i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
47     }
48     const uint16_t* i2 = input[2];
49     assert(i2 != NULL);
50     if XNN_UNPREDICTABLE(i2 != zero) {
51       i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
52     }
53     input = (const void**) ((uintptr_t) input + input_stride);
54 
55     size_t c = channels;
56     const uint16_t* w = weights;
57     for (; c >= 16; c -= 16) {
58       __m256 vacc01234567p0 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
59       __m256 vacc89ABCDEFp0 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 8)));
60 
61 
62       const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
63       const __m256 vi0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
64       i0 += 16;
65 
66       const __m256 vk0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
67       const __m256 vk0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 24)));
68       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
69       vacc89ABCDEFp0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0), _MM_FROUND_NO_EXC));
70 
71       const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
72       const __m256 vi1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
73       i1 += 16;
74 
75       const __m256 vk1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 32)));
76       const __m256 vk1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 40)));
77       __m256 vacc01234567p1 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vi1x01234567, vk1x01234567), _MM_FROUND_NO_EXC));
78       __m256 vacc89ABCDEFp1 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF), _MM_FROUND_NO_EXC));
79 
80       const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
81       const __m256 vi2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i2 + 8)));
82       i2 += 16;
83 
84       const __m256 vk2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 48)));
85       const __m256 vk2x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 56)));
86       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
87       vacc89ABCDEFp0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0), _MM_FROUND_NO_EXC));
88 
89       w += 64;
90 
91       // Add up all accumulators to vacc0123456789ABCDEFp0
92       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vacc01234567p0, vacc01234567p1), _MM_FROUND_NO_EXC));
93       vacc89ABCDEFp0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1), _MM_FROUND_NO_EXC));
94 
95       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
96       __m256 vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEFp0, vmin);
97       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
98       vacc89ABCDEF = _mm256_min_ps(vacc89ABCDEF, vmax);
99 
100       _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc01234567, _MM_FROUND_NO_EXC));
101       _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc89ABCDEF, _MM_FROUND_NO_EXC));
102       o += 16;
103     }
104     for (; c >= 8; c -= 8) {
105       __m256 vacc01234567p0 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
106 
107       const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
108       i0 += 8;
109 
110       const __m256 vk0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 16)));
111       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
112 
113       const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
114       i1 += 8;
115 
116       const __m256 vk1x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 32)));
117       __m256 vacc01234567p1 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vi1x01234567, vk1x01234567), _MM_FROUND_NO_EXC));
118 
119       const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
120       i2 += 8;
121 
122       const __m256 vk2x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 48)));
123       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
124 
125       w += 8;
126 
127       // Add up all accumulators to vacc01234567p0
128       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vacc01234567p0, vacc01234567p1), _MM_FROUND_NO_EXC));
129 
130       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
131       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
132 
133       _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc01234567, _MM_FROUND_NO_EXC));
134       o += 8;
135     }
136     if XNN_UNLIKELY(c != 0) {
137       assert(c >= 1);
138       assert(c <= 7);
139 
140       __m256 vacc01234567p0 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
141 
142       const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
143 
144       const __m256 vk0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 16)));
145       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
146 
147       const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
148 
149       const __m256 vk1x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 32)));
150       __m256 vacc01234567p1 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vi1x01234567, vk1x01234567), _MM_FROUND_NO_EXC));
151 
152       const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
153 
154       const __m256 vk2x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) (w + 48)));
155       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0), _MM_FROUND_NO_EXC));
156 
157       // Add up all accumulators to vacc01234567p0
158       vacc01234567p0 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vacc01234567p0, vacc01234567p1), _MM_FROUND_NO_EXC));
159 
160       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
161       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
162 
163       __m128i vh01234567 = _mm256_cvtps_ph(vacc01234567, _MM_FROUND_NO_EXC);
164       if (c & 4) {
165         _mm_storel_epi64((__m128i*) o, vh01234567);
166         vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
167         o += 4;
168       }
169       if (c & 2) {
170         _mm_storeu_si32(o, vh01234567);
171         vh01234567 = _mm_srli_epi64(vh01234567, 32);
172         o += 2;
173       }
174       if (c & 1) {
175         *o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
176         o += 1;
177       }
178     }
179 
180     o = (uint16_t*) ((uintptr_t) o + output_increment);
181   } while (--output_width != 0);
182 }
183