1 /* adler32_ssse3.c -- compute the Adler-32 checksum of a data stream
2  * Copyright (C) 1995-2011 Mark Adler
3  * Authors:
4  *   Adam Stylinski <[email protected]>
5  *   Brian Bockelman <[email protected]>
6  * For conditions of distribution and use, see copyright notice in zlib.h
7  */
8 
9 #include "../../zbuild.h"
10 #include "../../adler32_p.h"
11 #include "adler32_ssse3_p.h"
12 
13 #ifdef X86_SSSE3_ADLER32
14 
15 #include <immintrin.h>
16 
adler32_ssse3(uint32_t adler,const unsigned char * buf,size_t len)17 Z_INTERNAL uint32_t adler32_ssse3(uint32_t adler, const unsigned char *buf, size_t len) {
18     uint32_t sum2;
19 
20      /* split Adler-32 into component sums */
21     sum2 = (adler >> 16) & 0xffff;
22     adler &= 0xffff;
23 
24     /* in case user likes doing a byte at a time, keep it fast */
25     if (UNLIKELY(len == 1))
26         return adler32_len_1(adler, buf, sum2);
27 
28     /* initial Adler-32 value (deferred check for len == 1 speed) */
29     if (UNLIKELY(buf == NULL))
30         return 1L;
31 
32     /* in case short lengths are provided, keep it somewhat fast */
33     if (UNLIKELY(len < 16))
34         return adler32_len_16(adler, buf, len, sum2);
35 
36     const __m128i dot2v = _mm_setr_epi8(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17);
37     const __m128i dot2v_0 = _mm_setr_epi8(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
38     const __m128i dot3v = _mm_set1_epi16(1);
39     const __m128i zero = _mm_setzero_si128();
40 
41     __m128i vbuf, vs1_0, vs3, vs1, vs2, vs2_0, v_sad_sum1, v_short_sum2, v_short_sum2_0,
42             vbuf_0, v_sad_sum2, vsum2, vsum2_0;
43 
44     /* If our buffer is unaligned (likely), make the determination whether
45      * or not there's enough of a buffer to consume to make the scalar, aligning
46      * additions worthwhile or if it's worth it to just eat the cost of an unaligned
47      * load. This is a pretty simple test, just test if 16 - the remainder + len is
48      * < 16 */
49     size_t max_iters = NMAX;
50     size_t rem = (uintptr_t)buf & 15;
51     size_t align_offset = 16 - rem;
52     size_t k = 0;
53     if (rem) {
54         if (len < 16 + align_offset) {
55             /* Let's eat the cost of this one unaligned load so that
56              * we don't completely skip over the vectorization. Doing
57              * 16 bytes at a time unaligned is is better than 16 + <= 15
58              * sums */
59             vbuf = _mm_loadu_si128((__m128i*)buf);
60             len -= 16;
61             buf += 16;
62             vs1 = _mm_cvtsi32_si128(adler);
63             vs2 = _mm_cvtsi32_si128(sum2);
64             vs3 = _mm_setzero_si128();
65             vs1_0 = vs1;
66             goto unaligned_jmp;
67         }
68 
69         for (size_t i = 0; i < align_offset; ++i) {
70             adler += *(buf++);
71             sum2 += adler;
72         }
73 
74         /* lop off the max number of sums based on the scalar sums done
75          * above */
76         len -= align_offset;
77         max_iters -= align_offset;
78     }
79 
80 
81     while (len >= 16) {
82         vs1 = _mm_cvtsi32_si128(adler);
83         vs2 = _mm_cvtsi32_si128(sum2);
84         vs3 = _mm_setzero_si128();
85         vs2_0 = _mm_setzero_si128();
86         vs1_0 = vs1;
87 
88         k = (len < max_iters ? len : max_iters);
89         k -= k % 16;
90         len -= k;
91 
92         while (k >= 32) {
93             /*
94                vs1 = adler + sum(c[i])
95                vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] )
96             */
97             vbuf = _mm_load_si128((__m128i*)buf);
98             vbuf_0 = _mm_load_si128((__m128i*)(buf + 16));
99             buf += 32;
100             k -= 32;
101 
102             v_sad_sum1 = _mm_sad_epu8(vbuf, zero);
103             v_sad_sum2 = _mm_sad_epu8(vbuf_0, zero);
104             vs1 = _mm_add_epi32(v_sad_sum1, vs1);
105             vs3 = _mm_add_epi32(vs1_0, vs3);
106 
107             vs1 = _mm_add_epi32(v_sad_sum2, vs1);
108             v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v);
109             vsum2 = _mm_madd_epi16(v_short_sum2, dot3v);
110             v_short_sum2_0 = _mm_maddubs_epi16(vbuf_0, dot2v_0);
111             vs2 = _mm_add_epi32(vsum2, vs2);
112             vsum2_0 = _mm_madd_epi16(v_short_sum2_0, dot3v);
113             vs2_0 = _mm_add_epi32(vsum2_0, vs2_0);
114             vs1_0 = vs1;
115         }
116 
117         vs2 = _mm_add_epi32(vs2_0, vs2);
118         vs3 = _mm_slli_epi32(vs3, 5);
119         vs2 = _mm_add_epi32(vs3, vs2);
120         vs3 = _mm_setzero_si128();
121 
122         while (k >= 16) {
123             /*
124                vs1 = adler + sum(c[i])
125                vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] )
126             */
127             vbuf = _mm_load_si128((__m128i*)buf);
128             buf += 16;
129             k -= 16;
130 
131 unaligned_jmp:
132             v_sad_sum1 = _mm_sad_epu8(vbuf, zero);
133             vs1 = _mm_add_epi32(v_sad_sum1, vs1);
134             vs3 = _mm_add_epi32(vs1_0, vs3);
135             v_short_sum2 = _mm_maddubs_epi16(vbuf, dot2v_0);
136             vsum2 = _mm_madd_epi16(v_short_sum2, dot3v);
137             vs2 = _mm_add_epi32(vsum2, vs2);
138             vs1_0 = vs1;
139         }
140 
141         vs3 = _mm_slli_epi32(vs3, 4);
142         vs2 = _mm_add_epi32(vs2, vs3);
143 
144         /* We don't actually need to do a full horizontal sum, since psadbw is actually doing
145          * a partial reduction sum implicitly and only summing to integers in vector positions
146          * 0 and 2. This saves us some contention on the shuffle port(s) */
147         adler = partial_hsum(vs1) % BASE;
148         sum2 = hsum(vs2) % BASE;
149         max_iters = NMAX;
150     }
151 
152     /* Process tail (len < 16).  */
153     return adler32_len_16(adler, buf, len, sum2);
154 }
155 
156 #endif
157