xref: /aosp_15_r20/external/XNNPACK/src/s8-vclamp/sse2-x64.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2021 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <emmintrin.h>
9 
10 #include <xnnpack/unaligned.h>
11 #include <xnnpack/vunary.h>
12 
13 
xnn_s8_vclamp_ukernel__sse2_x64(size_t n,const int8_t * x,int8_t * y,const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])14 void xnn_s8_vclamp_ukernel__sse2_x64(
15     size_t n,
16     const int8_t* x,
17     int8_t* y,
18     const union xnn_s8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
19 {
20   assert(n != 0);
21 
22   const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
23   const __m128i voutput_max_with_bias = _mm_load_si128((const __m128i*) params->sse2.max_with_bias);
24   const __m128i voutput_min_with_bias = _mm_load_si128((const __m128i*) params->sse2.min_with_bias);
25   for (; n >= 64; n -= 64) {
26     __m128i vacc0 = _mm_loadu_si128((const __m128i*) x);
27     __m128i vacc1 = _mm_loadu_si128((const __m128i*) x + 1);
28     __m128i vacc2 = _mm_loadu_si128((const __m128i*) x + 2);
29     __m128i vacc3 = _mm_loadu_si128((const __m128i*) x + 3);
30     x += 64;
31 
32     vacc0 = _mm_xor_si128(vacc0, vbias);
33     vacc1 = _mm_xor_si128(vacc1, vbias);
34     vacc2 = _mm_xor_si128(vacc2, vbias);
35     vacc3 = _mm_xor_si128(vacc3, vbias);
36 
37     vacc0 = _mm_max_epu8(vacc0, voutput_min_with_bias);
38     vacc1 = _mm_max_epu8(vacc1, voutput_min_with_bias);
39     vacc2 = _mm_max_epu8(vacc2, voutput_min_with_bias);
40     vacc3 = _mm_max_epu8(vacc3, voutput_min_with_bias);
41 
42     vacc0 = _mm_min_epu8(vacc0, voutput_max_with_bias);
43     vacc1 = _mm_min_epu8(vacc1, voutput_max_with_bias);
44     vacc2 = _mm_min_epu8(vacc2, voutput_max_with_bias);
45     vacc3 = _mm_min_epu8(vacc3, voutput_max_with_bias);
46 
47     vacc0 = _mm_xor_si128(vacc0, vbias);
48     vacc1 = _mm_xor_si128(vacc1, vbias);
49     vacc2 = _mm_xor_si128(vacc2, vbias);
50     vacc3 = _mm_xor_si128(vacc3, vbias);
51 
52     _mm_storeu_si128((__m128i*) y, vacc0);
53     _mm_storeu_si128((__m128i*) y + 1, vacc1);
54     _mm_storeu_si128((__m128i*) y + 2, vacc2);
55     _mm_storeu_si128((__m128i*) y + 3, vacc3);
56     y += 64;
57   }
58   for (; n >= 16; n -= 16) {
59     __m128i vacc = _mm_loadu_si128((const __m128i*) x);
60     x += 16;
61 
62     vacc = _mm_xor_si128(vacc, vbias);
63     vacc = _mm_min_epu8(vacc, voutput_max_with_bias);
64     vacc = _mm_max_epu8(vacc, voutput_min_with_bias);
65     vacc = _mm_xor_si128(vacc, vbias);
66 
67     _mm_storeu_si128((__m128i*) y, vacc);
68     y += 16;
69   }
70   if XNN_UNLIKELY(n != 0) {
71     __m128i vacc = _mm_loadu_si128((const __m128i*) x);
72 
73     vacc = _mm_xor_si128(vacc, vbias);
74     vacc = _mm_min_epu8(vacc, voutput_max_with_bias);
75     vacc = _mm_max_epu8(vacc, voutput_min_with_bias);
76     vacc = _mm_xor_si128(vacc, vbias);
77 
78     if (n & 8) {
79       _mm_storel_epi64((__m128i*) y, vacc);
80       y += 8;
81       vacc = _mm_unpackhi_epi64(vacc, vacc);
82     }
83     if (n & 4) {
84       unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vacc));
85       y += 4;
86       vacc = _mm_srli_epi64(vacc, 32);
87     }
88     if (n & 2) {
89       unaligned_store_u16(y, (uint16_t) _mm_cvtsi128_si32(vacc));
90       y += 2;
91       vacc = _mm_srli_epi32(vacc, 16);
92     }
93     if (n & 1) {
94       *y = (int8_t) _mm_cvtsi128_si32(vacc);
95     }
96   }
97 }
98