1 // Auto-generated file. Do not edit!
2 // Template: src/f16-prelu/f16c.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/prelu.h>
17
18
xnn_f16_prelu_ukernel__f16c_2x16(size_t rows,size_t channels,const void * restrict input,size_t input_stride,const void * restrict weights,void * restrict output,size_t output_stride)19 void xnn_f16_prelu_ukernel__f16c_2x16(
20 size_t rows,
21 size_t channels,
22 const void* restrict input,
23 size_t input_stride,
24 const void* restrict weights,
25 void* restrict output,
26 size_t output_stride) XNN_OOB_READS
27 {
28 assert(rows != 0);
29 assert(channels != 0);
30 assert(channels % sizeof(uint16_t) == 0);
31
32 const uint16_t* i0 = (const uint16_t*) input;
33 uint16_t* o0 = (uint16_t*) output;
34 const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
35 uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
36
37 const size_t input_increment = input_stride * 2 - channels;
38 const size_t output_increment = output_stride * 2 - channels;
39
40 do {
41 if XNN_UNPREDICTABLE(rows < 2) {
42 i1 = i0;
43 o1 = o0;
44 }
45
46 const uint16_t* w = (const uint16_t*) weights;
47 size_t c = channels;
48 for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
49 const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
50 const __m256 vw89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
51 w += 16;
52
53 const __m256 vi0x001234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
54 const __m256 vi0x089ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
55 i0 += 16;
56 const __m256 vi1x001234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
57 const __m256 vi1x089ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
58 i1 += 16;
59
60 __m256 vacc0x001234567 = _mm256_mul_ps(vi0x001234567, vw01234567);
61 __m256 vacc0x089ABCDEF = _mm256_mul_ps(vi0x089ABCDEF, vw89ABCDEF);
62 __m256 vacc1x001234567 = _mm256_mul_ps(vi1x001234567, vw01234567);
63 __m256 vacc1x089ABCDEF = _mm256_mul_ps(vi1x089ABCDEF, vw89ABCDEF);
64
65 vacc0x001234567 = _mm256_blendv_ps(vi0x001234567, vacc0x001234567, vi0x001234567);
66 vacc0x089ABCDEF = _mm256_blendv_ps(vi0x089ABCDEF, vacc0x089ABCDEF, vi0x089ABCDEF);
67 vacc1x001234567 = _mm256_blendv_ps(vi1x001234567, vacc1x001234567, vi1x001234567);
68 vacc1x089ABCDEF = _mm256_blendv_ps(vi1x089ABCDEF, vacc1x089ABCDEF, vi1x089ABCDEF);
69
70 _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x089ABCDEF, _MM_FROUND_NO_EXC));
71 _mm_storeu_si128((__m128i*) (o0 + 0), _mm256_cvtps_ph(vacc0x001234567, _MM_FROUND_NO_EXC));
72 _mm_storeu_si128((__m128i*) (o0 + 8), _mm256_cvtps_ph(vacc0x089ABCDEF, _MM_FROUND_NO_EXC));
73 o0 += 16;
74 _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x089ABCDEF, _MM_FROUND_NO_EXC));
75 _mm_storeu_si128((__m128i*) (o1 + 0), _mm256_cvtps_ph(vacc1x001234567, _MM_FROUND_NO_EXC));
76 _mm_storeu_si128((__m128i*) (o1 + 8), _mm256_cvtps_ph(vacc1x089ABCDEF, _MM_FROUND_NO_EXC));
77 o1 += 16;
78 }
79 for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
80 const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
81 w += 8;
82
83 const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
84 i0 += 8;
85 const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
86 i1 += 8;
87
88 __m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
89 __m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
90
91 vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
92 vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
93
94 _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
95 o0 += 8;
96 _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
97 o1 += 8;
98 }
99 if XNN_UNLIKELY(c != 0) {
100 const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
101
102 const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
103 i0 = (const uint16_t*) ((uintptr_t) i0 + c);
104 const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
105 i1 = (const uint16_t*) ((uintptr_t) i1 + c);
106
107 __m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
108 __m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
109
110 vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
111 vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
112
113 __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
114 __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
115 if (c & (4 * sizeof(uint16_t))) {
116 _mm_storel_epi64((__m128i*) o0, vh0x01234567);
117 _mm_storel_epi64((__m128i*) o1, vh1x01234567);
118
119 vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
120 vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
121
122 o0 += 4;
123 o1 += 4;
124 }
125 if (c & (2 * sizeof(uint16_t))) {
126 _mm_storeu_si32(o0, vh0x01234567);
127 _mm_storeu_si32(o1, vh1x01234567);
128
129 vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
130 vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
131
132 o0 += 2;
133 o1 += 2;
134 }
135 if (c & (1 * sizeof(uint16_t))) {
136 *o0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
137 *o1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
138
139 o0 += 1;
140 o1 += 1;
141 }
142 }
143 i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
144 o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
145 i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
146 o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
147 rows = doz(rows, 2);
148 } while (rows != 0);
149 }
150