xref: /aosp_15_r20/external/scrypt/lib/crypto/crypto_scrypt-sse.c (revision cd192fa97f712aaa0b7692ec7c6cc9c7ad6c8620)
1 /*-
2  * Copyright 2009 Colin Percival
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * This file was originally written by Colin Percival as part of the Tarsnap
27  * online backup system.
28  */
29 #include "scrypt_platform.h"
30 
31 #include <sys/types.h>
32 #ifndef _WIN32
33 #include <sys/mman.h>
34 #endif
35 
36 #include <emmintrin.h>
37 #include <errno.h>
38 #include <stdint.h>
39 #include <stdlib.h>
40 #include <string.h>
41 
42 #ifdef USE_OPENSSL_PBKDF2
43 #include <openssl/evp.h>
44 #else
45 #include "sha256.h"
46 #endif
47 #include "sysendian.h"
48 
49 #include "crypto_scrypt.h"
50 
51 static void blkcpy(void *, void *, size_t);
52 static void blkxor(void *, void *, size_t);
53 static void salsa20_8(__m128i *);
54 static void blockmix_salsa8(__m128i *, __m128i *, __m128i *, size_t);
55 static uint64_t integerify(void *, size_t);
56 static void smix(uint8_t *, size_t, uint64_t, void *, void *);
57 
58 static void
blkcpy(void * dest,void * src,size_t len)59 blkcpy(void * dest, void * src, size_t len)
60 {
61 	__m128i * D = dest;
62 	__m128i * S = src;
63 	size_t L = len / 16;
64 	size_t i;
65 
66 	for (i = 0; i < L; i++)
67 		D[i] = S[i];
68 }
69 
70 static void
blkxor(void * dest,void * src,size_t len)71 blkxor(void * dest, void * src, size_t len)
72 {
73 	__m128i * D = dest;
74 	__m128i * S = src;
75 	size_t L = len / 16;
76 	size_t i;
77 
78 	for (i = 0; i < L; i++)
79 		D[i] = _mm_xor_si128(D[i], S[i]);
80 }
81 
82 /**
83  * salsa20_8(B):
84  * Apply the salsa20/8 core to the provided block.
85  */
86 static void
salsa20_8(__m128i B[4])87 salsa20_8(__m128i B[4])
88 {
89 	__m128i X0, X1, X2, X3;
90 	__m128i T;
91 	size_t i;
92 
93 	X0 = B[0];
94 	X1 = B[1];
95 	X2 = B[2];
96 	X3 = B[3];
97 
98 	for (i = 0; i < 8; i += 2) {
99 		/* Operate on "columns". */
100 		T = _mm_add_epi32(X0, X3);
101 		X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7));
102 		X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25));
103 		T = _mm_add_epi32(X1, X0);
104 		X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
105 		X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
106 		T = _mm_add_epi32(X2, X1);
107 		X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13));
108 		X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19));
109 		T = _mm_add_epi32(X3, X2);
110 		X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
111 		X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
112 
113 		/* Rearrange data. */
114 		X1 = _mm_shuffle_epi32(X1, 0x93);
115 		X2 = _mm_shuffle_epi32(X2, 0x4E);
116 		X3 = _mm_shuffle_epi32(X3, 0x39);
117 
118 		/* Operate on "rows". */
119 		T = _mm_add_epi32(X0, X1);
120 		X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7));
121 		X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 25));
122 		T = _mm_add_epi32(X3, X0);
123 		X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9));
124 		X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23));
125 		T = _mm_add_epi32(X2, X3);
126 		X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13));
127 		X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 19));
128 		T = _mm_add_epi32(X1, X2);
129 		X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18));
130 		X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14));
131 
132 		/* Rearrange data. */
133 		X1 = _mm_shuffle_epi32(X1, 0x39);
134 		X2 = _mm_shuffle_epi32(X2, 0x4E);
135 		X3 = _mm_shuffle_epi32(X3, 0x93);
136 	}
137 
138 	B[0] = _mm_add_epi32(B[0], X0);
139 	B[1] = _mm_add_epi32(B[1], X1);
140 	B[2] = _mm_add_epi32(B[2], X2);
141 	B[3] = _mm_add_epi32(B[3], X3);
142 }
143 
144 /**
145  * blockmix_salsa8(Bin, Bout, X, r):
146  * Compute Bout = BlockMix_{salsa20/8, r}(Bin).  The input Bin must be 128r
147  * bytes in length; the output Bout must also be the same size.  The
148  * temporary space X must be 64 bytes.
149  */
150 static void
blockmix_salsa8(__m128i * Bin,__m128i * Bout,__m128i * X,size_t r)151 blockmix_salsa8(__m128i * Bin, __m128i * Bout, __m128i * X, size_t r)
152 {
153 	size_t i;
154 
155 	/* 1: X <-- B_{2r - 1} */
156 	blkcpy(X, &Bin[8 * r - 4], 64);
157 
158 	/* 2: for i = 0 to 2r - 1 do */
159 	for (i = 0; i < r; i++) {
160 		/* 3: X <-- H(X \xor B_i) */
161 		blkxor(X, &Bin[i * 8], 64);
162 		salsa20_8(X);
163 
164 		/* 4: Y_i <-- X */
165 		/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
166 		blkcpy(&Bout[i * 4], X, 64);
167 
168 		/* 3: X <-- H(X \xor B_i) */
169 		blkxor(X, &Bin[i * 8 + 4], 64);
170 		salsa20_8(X);
171 
172 		/* 4: Y_i <-- X */
173 		/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
174 		blkcpy(&Bout[(r + i) * 4], X, 64);
175 	}
176 }
177 
178 /**
179  * integerify(B, r):
180  * Return the result of parsing B_{2r-1} as a little-endian integer.
181  */
182 static uint64_t
integerify(void * B,size_t r)183 integerify(void * B, size_t r)
184 {
185 	uint32_t * X = (void *)((uintptr_t)(B) + (2 * r - 1) * 64);
186 
187 	return (((uint64_t)(X[13]) << 32) + X[0]);
188 }
189 
190 /**
191  * smix(B, r, N, V, XY):
192  * Compute B = SMix_r(B, N).  The input B must be 128r bytes in length;
193  * the temporary storage V must be 128rN bytes in length; the temporary
194  * storage XY must be 256r + 64 bytes in length.  The value N must be a
195  * power of 2 greater than 1.  The arrays B, V, and XY must be aligned to a
196  * multiple of 64 bytes.
197  */
198 static void
smix(uint8_t * B,size_t r,uint64_t N,void * V,void * XY)199 smix(uint8_t * B, size_t r, uint64_t N, void * V, void * XY)
200 {
201 	__m128i * X = XY;
202 	__m128i * Y = (void *)((uintptr_t)(XY) + 128 * r);
203 	__m128i * Z = (void *)((uintptr_t)(XY) + 256 * r);
204 	uint32_t * X32 = (void *)X;
205 	uint64_t i, j;
206 	size_t k;
207 
208 	/* 1: X <-- B */
209 	for (k = 0; k < 2 * r; k++) {
210 		for (i = 0; i < 16; i++) {
211 			X32[k * 16 + i] =
212 			    le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
213 		}
214 	}
215 
216 	/* 2: for i = 0 to N - 1 do */
217 	for (i = 0; i < N; i += 2) {
218 		/* 3: V_i <-- X */
219 		blkcpy((void *)((uintptr_t)(V) + i * 128 * r), X, 128 * r);
220 
221 		/* 4: X <-- H(X) */
222 		blockmix_salsa8(X, Y, Z, r);
223 
224 		/* 3: V_i <-- X */
225 		blkcpy((void *)((uintptr_t)(V) + (i + 1) * 128 * r),
226 		    Y, 128 * r);
227 
228 		/* 4: X <-- H(X) */
229 		blockmix_salsa8(Y, X, Z, r);
230 	}
231 
232 	/* 6: for i = 0 to N - 1 do */
233 	for (i = 0; i < N; i += 2) {
234 		/* 7: j <-- Integerify(X) mod N */
235 		j = integerify(X, r) & (N - 1);
236 
237 		/* 8: X <-- H(X \xor V_j) */
238 		blkxor(X, (void *)((uintptr_t)(V) + j * 128 * r), 128 * r);
239 		blockmix_salsa8(X, Y, Z, r);
240 
241 		/* 7: j <-- Integerify(X) mod N */
242 		j = integerify(Y, r) & (N - 1);
243 
244 		/* 8: X <-- H(X \xor V_j) */
245 		blkxor(Y, (void *)((uintptr_t)(V) + j * 128 * r), 128 * r);
246 		blockmix_salsa8(Y, X, Z, r);
247 	}
248 
249 	/* 10: B' <-- X */
250 	for (k = 0; k < 2 * r; k++) {
251 		for (i = 0; i < 16; i++) {
252 			le32enc(&B[(k * 16 + (i * 5 % 16)) * 4],
253 			    X32[k * 16 + i]);
254 		}
255 	}
256 }
257 
258 /**
259  * crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen):
260  * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
261  * p, buflen) and write the result into buf.  The parameters r, p, and buflen
262  * must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32.  The parameter N
263  * must be a power of 2 greater than 1.
264  *
265  * Return 0 on success; or -1 on error.
266  */
267 int
crypto_scrypt(const uint8_t * passwd,size_t passwdlen,const uint8_t * salt,size_t saltlen,uint64_t N,uint32_t r,uint32_t p,uint8_t * buf,size_t buflen)268 crypto_scrypt(const uint8_t * passwd, size_t passwdlen,
269     const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p,
270     uint8_t * buf, size_t buflen)
271 {
272 	void * B0, * V0, * XY0;
273 	uint8_t * B;
274 	uint32_t * V;
275 	uint32_t * XY;
276 	uint32_t i;
277 
278 	/* Sanity-check parameters. */
279 #if SIZE_MAX > UINT32_MAX
280 	if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
281 		errno = EFBIG;
282 		goto err0;
283 	}
284 #endif
285 	if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
286 		errno = EFBIG;
287 		goto err0;
288 	}
289 	if (((N & (N - 1)) != 0) || (N == 0)) {
290 		errno = EINVAL;
291 		goto err0;
292 	}
293 	if ((r > SIZE_MAX / 128 / p) ||
294 #if SIZE_MAX / 256 <= UINT32_MAX
295 	    (r > (SIZE_MAX - 64) / 256) ||
296 #endif
297 	    (N > SIZE_MAX / 128 / r)) {
298 		errno = ENOMEM;
299 		goto err0;
300 	}
301 
302 	/* Allocate memory. */
303 #ifdef HAVE_POSIX_MEMALIGN
304 	if ((errno = posix_memalign(&B0, 64, 128 * r * p)) != 0)
305 		goto err0;
306 	B = (uint8_t *)(B0);
307 	if ((errno = posix_memalign(&XY0, 64, 256 * r + 64)) != 0)
308 		goto err1;
309 	XY = (uint32_t *)(XY0);
310 #ifndef MAP_ANON
311 	if ((errno = posix_memalign(&V0, 64, 128 * r * N)) != 0)
312 		goto err2;
313 	V = (uint32_t *)(V0);
314 #endif
315 #else
316 	if ((B0 = malloc(128 * r * p + 63)) == NULL)
317 		goto err0;
318 	B = (uint8_t *)(((uintptr_t)(B0) + 63) & ~ (uintptr_t)(63));
319 	if ((XY0 = malloc(256 * r + 64 + 63)) == NULL)
320 		goto err1;
321 	XY = (uint32_t *)(((uintptr_t)(XY0) + 63) & ~ (uintptr_t)(63));
322 #ifndef MAP_ANON
323 	if ((V0 = malloc(128 * r * N + 63)) == NULL)
324 		goto err2;
325 	V = (uint32_t *)(((uintptr_t)(V0) + 63) & ~ (uintptr_t)(63));
326 #endif
327 #endif
328 #ifdef MAP_ANON
329 	if ((V0 = mmap(NULL, 128 * r * N, PROT_READ | PROT_WRITE,
330 #ifdef MAP_NOCORE
331 	    MAP_ANON | MAP_PRIVATE | MAP_NOCORE,
332 #else
333 	    MAP_ANON | MAP_PRIVATE,
334 #endif
335 	    -1, 0)) == MAP_FAILED)
336 		goto err2;
337 	V = (uint32_t *)(V0);
338 #endif
339 
340 	/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
341 #ifdef USE_OPENSSL_PBKDF2
342 	PKCS5_PBKDF2_HMAC((const char *)passwd, passwdlen, salt, saltlen, 1, EVP_sha256(), p * 128 * r, B);
343 #else
344 	PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, p * 128 * r);
345 #endif
346 
347 	/* 2: for i = 0 to p - 1 do */
348 	for (i = 0; i < p; i++) {
349 		/* 3: B_i <-- MF(B_i, N) */
350 		smix(&B[i * 128 * r], r, N, V, XY);
351 	}
352 
353 	/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
354 #ifdef USE_OPENSSL_PBKDF2
355 	PKCS5_PBKDF2_HMAC((const char *)passwd, passwdlen, B, p * 128 * r, 1, EVP_sha256(), buflen, buf);
356 #else
357 	PBKDF2_SHA256(passwd, passwdlen, B, p * 128 * r, 1, buf, buflen);
358 #endif
359 
360 	/* Free memory. */
361 #ifdef MAP_ANON
362 	if (munmap(V0, 128 * r * N))
363 		goto err2;
364 #else
365 	free(V0);
366 #endif
367 	free(XY0);
368 	free(B0);
369 
370 	/* Success! */
371 	return (0);
372 
373 err2:
374 	free(XY0);
375 err1:
376 	free(B0);
377 err0:
378 	/* Failure! */
379 	return (-1);
380 }
381