1 /*
2  * Hardware-accelerated CRC-32 variants for Linux on z Systems
3  *
4  * Use the z/Architecture Vector Extension Facility to accelerate the
5  * computing of bitreflected CRC-32 checksums.
6  *
7  * This CRC-32 implementation algorithm is bitreflected and processes
8  * the least-significant bit first (Little-Endian).
9  *
10  * This code was originally written by Hendrik Brueckner
11  * <[email protected]> for use in the Linux kernel and has been
12  * relicensed under the zlib license.
13  */
14 
15 #include "../../zbuild.h"
16 #include "crc32_braid_p.h"
17 
18 #include <vecintrin.h>
19 
20 typedef unsigned char uv16qi __attribute__((vector_size(16)));
21 typedef unsigned int uv4si __attribute__((vector_size(16)));
22 typedef unsigned long long uv2di __attribute__((vector_size(16)));
23 
crc32_le_vgfm_16(uint32_t crc,const unsigned char * buf,size_t len)24 static uint32_t crc32_le_vgfm_16(uint32_t crc, const unsigned char *buf, size_t len) {
25     /*
26      * The CRC-32 constant block contains reduction constants to fold and
27      * process particular chunks of the input data stream in parallel.
28      *
29      * For the CRC-32 variants, the constants are precomputed according to
30      * these definitions:
31      *
32      *      R1 = [(x4*128+32 mod P'(x) << 32)]' << 1
33      *      R2 = [(x4*128-32 mod P'(x) << 32)]' << 1
34      *      R3 = [(x128+32 mod P'(x) << 32)]'   << 1
35      *      R4 = [(x128-32 mod P'(x) << 32)]'   << 1
36      *      R5 = [(x64 mod P'(x) << 32)]'       << 1
37      *      R6 = [(x32 mod P'(x) << 32)]'       << 1
38      *
39      *      The bitreflected Barret reduction constant, u', is defined as
40      *      the bit reversal of floor(x**64 / P(x)).
41      *
42      *      where P(x) is the polynomial in the normal domain and the P'(x) is the
43      *      polynomial in the reversed (bitreflected) domain.
44      *
45      * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
46      *
47      *      P(x)  = 0x04C11DB7
48      *      P'(x) = 0xEDB88320
49      */
50     const uv16qi perm_le2be = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};  /* BE->LE mask */
51     const uv2di r2r1 = {0x1C6E41596, 0x154442BD4};                                     /* R2, R1 */
52     const uv2di r4r3 = {0x0CCAA009E, 0x1751997D0};                                     /* R4, R3 */
53     const uv2di r5 = {0, 0x163CD6124};                                                 /* R5 */
54     const uv2di ru_poly = {0, 0x1F7011641};                                            /* u' */
55     const uv2di crc_poly = {0, 0x1DB710641};                                           /* P'(x) << 1 */
56 
57     /*
58      * Load the initial CRC value.
59      *
60      * The CRC value is loaded into the rightmost word of the
61      * vector register and is later XORed with the LSB portion
62      * of the loaded input data.
63      */
64     uv2di v0 = {0, 0};
65     v0 = (uv2di)vec_insert(crc, (uv4si)v0, 3);
66 
67     /* Load a 64-byte data chunk and XOR with CRC */
68     uv2di v1 = vec_perm(((uv2di *)buf)[0], ((uv2di *)buf)[0], perm_le2be);
69     uv2di v2 = vec_perm(((uv2di *)buf)[1], ((uv2di *)buf)[1], perm_le2be);
70     uv2di v3 = vec_perm(((uv2di *)buf)[2], ((uv2di *)buf)[2], perm_le2be);
71     uv2di v4 = vec_perm(((uv2di *)buf)[3], ((uv2di *)buf)[3], perm_le2be);
72 
73     v1 ^= v0;
74     buf += 64;
75     len -= 64;
76 
77     while (len >= 64) {
78         /* Load the next 64-byte data chunk */
79         uv16qi part1 = vec_perm(((uv16qi *)buf)[0], ((uv16qi *)buf)[0], perm_le2be);
80         uv16qi part2 = vec_perm(((uv16qi *)buf)[1], ((uv16qi *)buf)[1], perm_le2be);
81         uv16qi part3 = vec_perm(((uv16qi *)buf)[2], ((uv16qi *)buf)[2], perm_le2be);
82         uv16qi part4 = vec_perm(((uv16qi *)buf)[3], ((uv16qi *)buf)[3], perm_le2be);
83 
84         /*
85          * Perform a GF(2) multiplication of the doublewords in V1 with
86          * the R1 and R2 reduction constants in V0.  The intermediate result
87          * is then folded (accumulated) with the next data chunk in PART1 and
88          * stored in V1. Repeat this step for the register contents
89          * in V2, V3, and V4 respectively.
90          */
91         v1 = (uv2di)vec_gfmsum_accum_128(r2r1, v1, part1);
92         v2 = (uv2di)vec_gfmsum_accum_128(r2r1, v2, part2);
93         v3 = (uv2di)vec_gfmsum_accum_128(r2r1, v3, part3);
94         v4 = (uv2di)vec_gfmsum_accum_128(r2r1, v4, part4);
95 
96         buf += 64;
97         len -= 64;
98     }
99 
100     /*
101      * Fold V1 to V4 into a single 128-bit value in V1.  Multiply V1 with R3
102      * and R4 and accumulating the next 128-bit chunk until a single 128-bit
103      * value remains.
104      */
105     v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
106     v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v3);
107     v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v4);
108 
109     while (len >= 16) {
110         /* Load next data chunk */
111         v2 = vec_perm(*(uv2di *)buf, *(uv2di *)buf, perm_le2be);
112 
113         /* Fold next data chunk */
114         v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
115 
116         buf += 16;
117         len -= 16;
118     }
119 
120     /*
121      * Set up a vector register for byte shifts.  The shift value must
122      * be loaded in bits 1-4 in byte element 7 of a vector register.
123      * Shift by 8 bytes: 0x40
124      * Shift by 4 bytes: 0x20
125      */
126     uv16qi v9 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
127     v9 = vec_insert((unsigned char)0x40, v9, 7);
128 
129     /*
130      * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
131      * to move R4 into the rightmost doubleword and set the leftmost
132      * doubleword to 0x1.
133      */
134     v0 = vec_srb(r4r3, (uv2di)v9);
135     v0[0] = 1;
136 
137     /*
138      * Compute GF(2) product of V1 and V0.  The rightmost doubleword
139      * of V1 is multiplied with R4.  The leftmost doubleword of V1 is
140      * multiplied by 0x1 and is then XORed with rightmost product.
141      * Implicitly, the intermediate leftmost product becomes padded
142      */
143     v1 = (uv2di)vec_gfmsum_128(v0, v1);
144 
145     /*
146      * Now do the final 32-bit fold by multiplying the rightmost word
147      * in V1 with R5 and XOR the result with the remaining bits in V1.
148      *
149      * To achieve this by a single VGFMAG, right shift V1 by a word
150      * and store the result in V2 which is then accumulated.  Use the
151      * vector unpack instruction to load the rightmost half of the
152      * doubleword into the rightmost doubleword element of V1; the other
153      * half is loaded in the leftmost doubleword.
154      * The vector register with CONST_R5 contains the R5 constant in the
155      * rightmost doubleword and the leftmost doubleword is zero to ignore
156      * the leftmost product of V1.
157      */
158     v9 = vec_insert((unsigned char)0x20, v9, 7);
159     v2 = vec_srb(v1, (uv2di)v9);
160     v1 = vec_unpackl((uv4si)v1);  /* Split rightmost doubleword */
161     v1 = (uv2di)vec_gfmsum_accum_128(r5, v1, (uv16qi)v2);
162 
163     /*
164      * Apply a Barret reduction to compute the final 32-bit CRC value.
165      *
166      * The input values to the Barret reduction are the degree-63 polynomial
167      * in V1 (R(x)), degree-32 generator polynomial, and the reduction
168      * constant u.  The Barret reduction result is the CRC value of R(x) mod
169      * P(x).
170      *
171      * The Barret reduction algorithm is defined as:
172      *
173      *    1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
174      *    2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
175      *    3. C(x)  = R(x) XOR T2(x) mod x^32
176      *
177      *  Note: The leftmost doubleword of vector register containing
178      *  CONST_RU_POLY is zero and, thus, the intermediate GF(2) product
179      *  is zero and does not contribute to the final result.
180      */
181 
182     /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
183     v2 = vec_unpackl((uv4si)v1);
184     v2 = (uv2di)vec_gfmsum_128(ru_poly, v2);
185 
186     /*
187      * Compute the GF(2) product of the CRC polynomial with T1(x) in
188      * V2 and XOR the intermediate result, T2(x), with the value in V1.
189      * The final result is stored in word element 2 of V2.
190      */
191     v2 = vec_unpackl((uv4si)v2);
192     v2 = (uv2di)vec_gfmsum_accum_128(crc_poly, v2, (uv16qi)v1);
193 
194     return ((uv4si)v2)[2];
195 }
196 
197 #define VX_MIN_LEN 64
198 #define VX_ALIGNMENT 16L
199 #define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
200 
PREFIX(s390_crc32_vx)201 uint32_t Z_INTERNAL PREFIX(s390_crc32_vx)(uint32_t crc, const unsigned char *buf, uint64_t len) {
202     uint64_t prealign, aligned, remaining;
203 
204     if (len < VX_MIN_LEN + VX_ALIGN_MASK)
205         return crc32_braid(crc, buf, len);
206 
207     if ((uintptr_t)buf & VX_ALIGN_MASK) {
208         prealign = VX_ALIGNMENT - ((uintptr_t)buf & VX_ALIGN_MASK);
209         len -= prealign;
210         crc = crc32_braid(crc, buf, prealign);
211         buf += prealign;
212     }
213     aligned = len & ~VX_ALIGN_MASK;
214     remaining = len & VX_ALIGN_MASK;
215 
216     crc = crc32_le_vgfm_16(crc ^ 0xffffffff, buf, (size_t)aligned) ^ 0xffffffff;
217 
218     if (remaining)
219         crc = crc32_braid(crc, buf + aligned, remaining);
220 
221     return crc;
222 }
223