xref: /aosp_15_r20/external/XNNPACK/src/u8-ibilinear/gen/sse41-c16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/s8-ibilinear/sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/ibilinear.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_u8_ibilinear_ukernel__sse41_c16(size_t output_pixels,size_t channels,const uint8_t ** restrict input,size_t input_offset,const int16_t * restrict weights,uint8_t * restrict output,size_t output_increment)19 void xnn_u8_ibilinear_ukernel__sse41_c16(
20     size_t output_pixels,
21     size_t channels,
22     const uint8_t**restrict input,
23     size_t input_offset,
24     const int16_t*restrict weights,
25     uint8_t*restrict output,
26     size_t output_increment) XNN_OOB_READS
27 {
28   assert(output_pixels != 0);
29   assert(channels != 0);
30 
31   do {
32     const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
33     const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
34     const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
35     const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
36     input += 4;
37 
38     const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
39     weights += 2;
40     __m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
41     valphah = _mm_unpacklo_epi64(valphah, valphah);
42     __m128i valphav = _mm_srli_epi32(valpha, 16);
43     valphav = _mm_shuffle_epi32(valphav, _MM_SHUFFLE(0, 0, 0, 0));
44 
45     valphah = _mm_blend_epi16(valphah, _mm_sub_epi16(_mm_set1_epi32(0x08000000), valphah), 0xAA);
46 
47     const __m128i vrounding = _mm_set1_epi32(0x00200000);
48 
49     size_t c = channels;
50     for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
51       const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
52       const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
53       const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
54       const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
55       const __m128i vtl89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
56       const __m128i vtr89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
57       const __m128i vbl89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
58       const __m128i vbr89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
59       i0 += 16;
60       i1 += 16;
61       i2 += 16;
62       i3 += 16;
63 
64 
65       const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
66       const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
67       const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
68       const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
69       const __m128i vdr89ABCDEF = _mm_sub_epi16(vbr89ABCDEF, vtr89ABCDEF);
70       const __m128i vt89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
71       const __m128i vdl89ABCDEF = _mm_sub_epi16(vbl89ABCDEF, vtl89ABCDEF);
72       const __m128i vtCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
73 
74       const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
75       const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
76       const __m128i vd89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
77       const __m128i vdCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
78 
79       __m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
80       __m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
81       __m128i vacc89AB = _mm_mullo_epi32(vd89AB, valphav);
82       __m128i vaccCDEF = _mm_mullo_epi32(vdCDEF, valphav);
83 
84       vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
85       vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
86       vacc89AB = _mm_add_epi32(_mm_slli_epi32(vt89AB, 11), vacc89AB);
87       vaccCDEF = _mm_add_epi32(_mm_slli_epi32(vtCDEF, 11), vaccCDEF);
88 
89       vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
90       vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
91       vacc89AB = _mm_srli_epi32(_mm_add_epi16(vacc89AB, vrounding), 22);
92       vaccCDEF = _mm_srli_epi32(_mm_add_epi16(vaccCDEF, vrounding), 22);
93 
94       const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
95       const __m128i vacc89ABCDEF = _mm_packs_epi32(vacc89AB, vaccCDEF);
96 
97       const __m128i vo0123456789ABCDEF = _mm_packus_epi16(vacc01234567, vacc89ABCDEF);
98 
99       _mm_storeu_si128((__m128i*) output, vo0123456789ABCDEF);
100       output += 16;
101     }
102     for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
103       const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
104       i0 += 8;
105       const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
106       i1 += 8;
107       const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
108       i2 += 8;
109       const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
110       i3 += 8;
111 
112 
113       const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
114       const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
115       const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
116       const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
117 
118       const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
119       const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
120 
121       __m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
122       __m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
123 
124       vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
125       vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
126 
127       vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
128       vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
129 
130       const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
131 
132       const __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
133 
134       _mm_storel_epi64((__m128i*) output, vo01234567);
135       output += 8;
136     }
137     if XNN_UNLIKELY(c != 0) {
138       const __m128i vtl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
139       const __m128i vtr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
140       const __m128i vbl01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
141       const __m128i vbr01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
142 
143 
144       const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
145       const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
146       const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
147       const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
148 
149       const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
150       const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
151 
152       __m128i vacc0123 = _mm_mullo_epi32(vd0123, valphav);
153       __m128i vacc4567 = _mm_mullo_epi32(vd4567, valphav);
154 
155       vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
156       vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
157 
158       vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
159       vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
160 
161       const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
162 
163       __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
164 
165       if (c & (4 * sizeof(uint8_t))) {
166         unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
167         output += 4;
168         vo01234567 = _mm_srli_epi64(vo01234567, 32);
169       }
170       if (c & (2 * sizeof(uint8_t))) {
171         unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vo01234567, 0));
172         output += 2;
173         vo01234567 = _mm_srli_epi32(vo01234567, 16);
174       }
175       if (c & (1 * sizeof(uint8_t))) {
176         *output++ = (uint8_t) _mm_extract_epi8(vo01234567, 0);
177       }
178     }
179 
180     output = (uint8_t*) ((uintptr_t) output + output_increment);
181   } while (--output_pixels != 0);
182 }
183