xref: /aosp_15_r20/external/XNNPACK/src/f16-gavgpool/gen/7x-minmax-f16c-c8.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gavgpool/unipass-f16c.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8(size_t rows,size_t channels,const void * input,size_t input_stride,const void * zero,void * output,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8(
19     size_t rows,
20     size_t channels,
21     const void* input,
22     size_t input_stride,
23     const void* zero,
24     void* output,
25     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
26 {
27   assert(rows != 0);
28   assert(rows <= 7);
29   assert(channels != 0);
30 
31   const uint16_t* i0 = input;
32   const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
33   if XNN_UNPREDICTABLE(rows < 2) {
34     i1 = (const uint16_t*) zero;
35   }
36   const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
37   if XNN_UNPREDICTABLE(rows <= 2) {
38     i2 = (const uint16_t*) zero;
39   }
40   const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
41   if XNN_UNPREDICTABLE(rows < 4) {
42     i3 = (const uint16_t*) zero;
43   }
44   const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
45   if XNN_UNPREDICTABLE(rows <= 4) {
46     i4 = (const uint16_t*) zero;
47   }
48   const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
49   if XNN_UNPREDICTABLE(rows < 6) {
50     i5 = (const uint16_t*) zero;
51   }
52   const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
53   if XNN_UNPREDICTABLE(rows <= 6) {
54     i6 = (const uint16_t*) zero;
55   }
56   uint16_t* o = (uint16_t*) output;
57 
58   const __m256 vscale = _mm256_load_ps(params->avx.scale);
59   const __m256 vmin = _mm256_load_ps(params->avx.min);
60   const __m256 vmax = _mm256_load_ps(params->avx.max);
61   for (; channels >= 8; channels -= 8) {
62     const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
63     i0 += 8;
64     const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
65     i1 += 8;
66 
67     const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
68     __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
69     i2 += 8;
70 
71     const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
72     i3 += 8;
73     vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
74     const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
75     i4 += 8;
76     vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
77     const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
78     i5 += 8;
79     vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
80     const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
81     i6 += 8;
82     vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
83     vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
84 
85     vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
86 
87     __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
88 
89     vout01234567 = _mm256_min_ps(vout01234567, vmax);
90 
91     _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC));
92     o += 8;
93   }
94   if XNN_UNLIKELY(channels != 0) {
95     {
96       const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
97       const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
98 
99       const __m256 vi2x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
100       __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vi0x01234567, vi1x01234567), _MM_FROUND_NO_EXC);
101 
102       const __m256 vi3x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
103       vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi2x01234567), _MM_FROUND_NO_EXC);
104       const __m256 vi4x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
105       vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi3x01234567), _MM_FROUND_NO_EXC);
106       const __m256 vi5x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
107       vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi4x01234567), _MM_FROUND_NO_EXC);
108       const __m256 vi6x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
109       vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi5x01234567), _MM_FROUND_NO_EXC);
110       vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(_mm256_cvtph_ps(vacc01234567), vi6x01234567), _MM_FROUND_NO_EXC);
111 
112       vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vscale), _MM_FROUND_NO_EXC);
113       __m256 vout01234567 = _mm256_max_ps(_mm256_cvtph_ps(vacc01234567), vmin);
114       vout01234567 = _mm256_min_ps(vout01234567, vmax);
115 
116       __m128i vh01234567 = _mm256_cvtps_ph(vout01234567, _MM_FROUND_NO_EXC);
117       if (channels & 4) {
118         _mm_storel_epi64((__m128i*) o, vh01234567);
119         o += 4;
120         vh01234567 = _mm_unpackhi_epi64(vh01234567, vh01234567);
121       }
122       if (channels & 2) {
123         _mm_storeu_si32(o, vh01234567);
124         o += 2;
125         vh01234567 = _mm_srli_epi64(vh01234567, 32);
126       }
127       if (channels & 1) {
128         *o = (uint16_t) _mm_extract_epi16(vh01234567, 0);
129       }
130     }
131   }
132 }
133