xref: /aosp_15_r20/external/XNNPACK/src/u8-ibilinear/gen/sse2-c16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/s8-ibilinear/sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/ibilinear.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_u8_ibilinear_ukernel__sse2_c16(size_t output_pixels,size_t channels,const uint8_t ** restrict input,size_t input_offset,const int16_t * restrict weights,uint8_t * restrict output,size_t output_increment)19 void xnn_u8_ibilinear_ukernel__sse2_c16(
20     size_t output_pixels,
21     size_t channels,
22     const uint8_t**restrict input,
23     size_t input_offset,
24     const int16_t*restrict weights,
25     uint8_t*restrict output,
26     size_t output_increment) XNN_OOB_READS
27 {
28   assert(output_pixels != 0);
29   assert(channels != 0);
30 
31   do {
32     const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
33     const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
34     const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
35     const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
36     input += 4;
37 
38     const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
39     weights += 2;
40     __m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
41     valphah = _mm_unpacklo_epi64(valphah, valphah);
42     __m128i valphav = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(1, 1, 1, 1));
43     valphav = _mm_unpacklo_epi64(valphav, valphav);
44 
45     valphah = _mm_xor_si128(valphah, _mm_set1_epi32(0xFFFF0000));
46     valphah = _mm_add_epi16(valphah, _mm_set1_epi32(0x08010000));
47 
48     const __m128i vrounding = _mm_set1_epi32(0x00200000);
49 
50     size_t c = channels;
51     for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
52       __m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
53       __m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
54       __m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
55       __m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
56       __m128i vtl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
57       __m128i vtr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
58       __m128i vbl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
59       __m128i vbr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
60       i0 += 16;
61       i1 += 16;
62       i2 += 16;
63       i3 += 16;
64 
65       __m128i vzero = _mm_setzero_si128();
66       vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
67       vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
68       vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
69       vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
70       vtl89ABCDEF = _mm_unpacklo_epi8(vtl89ABCDEF, vzero);
71       vtr89ABCDEF = _mm_unpacklo_epi8(vtr89ABCDEF, vzero);
72       vbl89ABCDEF = _mm_unpacklo_epi8(vbl89ABCDEF, vzero);
73       vbr89ABCDEF = _mm_unpacklo_epi8(vbr89ABCDEF, vzero);
74 
75       const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
76       const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
77       const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
78       const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
79       const __m128i vdr89ABCDEF = _mm_sub_epi16(vbr89ABCDEF, vtr89ABCDEF);
80       const __m128i vt89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
81       const __m128i vdl89ABCDEF = _mm_sub_epi16(vbl89ABCDEF, vtl89ABCDEF);
82       const __m128i vtCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
83 
84       const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
85       const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
86       const __m128i vd89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
87       const __m128i vdCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
88 
89       __m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
90       __m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
91       __m128i vacc89AB = _mm_slli_epi32(_mm_mulhi_epu16(vd89AB, valphav), 16);
92       __m128i vaccCDEF = _mm_slli_epi32(_mm_mulhi_epu16(vdCDEF, valphav), 16);
93 
94       vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
95       vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
96       vacc89AB = _mm_add_epi16(_mm_mullo_epi16(vd89AB, valphav), vacc89AB);
97       vaccCDEF = _mm_add_epi16(_mm_mullo_epi16(vdCDEF, valphav), vaccCDEF);
98 
99       vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
100       vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
101       vacc89AB = _mm_add_epi32(_mm_slli_epi32(vt89AB, 11), vacc89AB);
102       vaccCDEF = _mm_add_epi32(_mm_slli_epi32(vtCDEF, 11), vaccCDEF);
103 
104       vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
105       vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
106       vacc89AB = _mm_srli_epi32(_mm_add_epi16(vacc89AB, vrounding), 22);
107       vaccCDEF = _mm_srli_epi32(_mm_add_epi16(vaccCDEF, vrounding), 22);
108 
109       const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
110       const __m128i vacc89ABCDEF = _mm_packs_epi32(vacc89AB, vaccCDEF);
111 
112       const __m128i vo0123456789ABCDEF = _mm_packus_epi16(vacc01234567, vacc89ABCDEF);
113 
114       _mm_storeu_si128((__m128i*) output, vo0123456789ABCDEF);
115       output += 16;
116     }
117     for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
118       __m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
119       i0 += 8;
120       __m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
121       i1 += 8;
122       __m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
123       i2 += 8;
124       __m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
125       i3 += 8;
126 
127       __m128i vzero = _mm_setzero_si128();
128       vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
129       vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
130       vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
131       vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
132 
133       const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
134       const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
135       const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
136       const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
137 
138       const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
139       const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
140 
141       __m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
142       __m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
143 
144       vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
145       vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
146 
147       vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
148       vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
149 
150       vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
151       vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
152 
153       const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
154 
155       const __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
156 
157       _mm_storel_epi64((__m128i*) output, vo01234567);
158       output += 8;
159     }
160     if XNN_UNLIKELY(c != 0) {
161       __m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
162       __m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
163       __m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
164       __m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
165 
166       __m128i vzero = _mm_setzero_si128();
167       vtl01234567 = _mm_unpacklo_epi8(vtl01234567, vzero);
168       vtr01234567 = _mm_unpacklo_epi8(vtr01234567, vzero);
169       vbl01234567 = _mm_unpacklo_epi8(vbl01234567, vzero);
170       vbr01234567 = _mm_unpacklo_epi8(vbr01234567, vzero);
171 
172       const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
173       const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
174       const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
175       const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
176 
177       const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
178       const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
179 
180       __m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
181       __m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
182 
183       vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
184       vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
185 
186       vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
187       vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
188 
189       vacc0123 = _mm_srli_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
190       vacc4567 = _mm_srli_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
191 
192       const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
193 
194       __m128i vo01234567 = _mm_packus_epi16(vacc01234567, vacc01234567);
195 
196       if (c & (4 * sizeof(uint8_t))) {
197         unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vo01234567));
198         output += 4;
199         vo01234567 = _mm_srli_epi64(vo01234567, 32);
200       }
201       uint32_t vo0123 = (uint32_t) _mm_cvtsi128_si32(vo01234567);
202       if (c & (2 * sizeof(uint8_t))) {
203         unaligned_store_u16(output, (uint16_t) vo0123);
204         output += 2;
205         vo0123 >>= 16;
206       }
207       if (c & (1 * sizeof(uint8_t))) {
208         *output++ = (uint8_t) vo0123;
209       }
210     }
211 
212     output = (uint8_t*) ((uintptr_t) output + output_increment);
213   } while (--output_pixels != 0);
214 }
215