xref: /aosp_15_r20/external/coreboot/src/lib/xxhash.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 
3 /*
4  * xxHash - Extremely Fast Hash algorithm
5  * Copyright (C) 2012-2016, Yann Collet.
6  *
7  * You can contact the author at:
8  * - xxHash homepage: http://cyan4973.github.io/xxHash/
9  * - xxHash source repository: https://github.com/Cyan4973/xxHash
10  */
11 
12 #include <arch/byteorder.h>
13 #include <endian.h>
14 #include <string.h>
15 #include <xxhash.h>
16 
17 /*-*************************************
18  * Macros
19  **************************************/
20 #define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
21 #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
22 
23 /*-*************************************
24  * Constants
25  **************************************/
26 static const uint32_t PRIME32_1 = 2654435761U;
27 static const uint32_t PRIME32_2 = 2246822519U;
28 static const uint32_t PRIME32_3 = 3266489917U;
29 static const uint32_t PRIME32_4 =  668265263U;
30 static const uint32_t PRIME32_5 =  374761393U;
31 
32 static const uint64_t PRIME64_1 = 11400714785074694791ULL;
33 static const uint64_t PRIME64_2 = 14029467366897019727ULL;
34 static const uint64_t PRIME64_3 =  1609587929392839161ULL;
35 static const uint64_t PRIME64_4 =  9650029242287828579ULL;
36 static const uint64_t PRIME64_5 =  2870177450012600261ULL;
37 
38 /*-**************************
39  *  Utils
40  ***************************/
xxh32_copy_state(struct xxh32_state * dst,const struct xxh32_state * src)41 void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)
42 {
43 	memcpy(dst, src, sizeof(*dst));
44 }
45 
xxh64_copy_state(struct xxh64_state * dst,const struct xxh64_state * src)46 void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
47 {
48 	memcpy(dst, src, sizeof(*dst));
49 }
50 
xxh_get_unaligned_le32(const void * p)51 static uint32_t xxh_get_unaligned_le32(const void *p)
52 {
53 	const uint32_t *p32 = (const uint32_t *)p;
54 	return le32toh(*p32);
55 }
56 
xxh_get_unaligned_le64(const void * p)57 static uint64_t xxh_get_unaligned_le64(const void *p)
58 {
59 	const uint64_t *p64 = (const uint64_t *)p;
60 	return le64toh(*p64);
61 }
62 
63 /*-***************************
64  * Simple Hash Functions
65  ****************************/
xxh32_round(uint32_t seed,const uint32_t input)66 static uint32_t xxh32_round(uint32_t seed, const uint32_t input)
67 {
68 	seed += input * PRIME32_2;
69 	seed = xxh_rotl32(seed, 13);
70 	seed *= PRIME32_1;
71 	return seed;
72 }
73 
xxh32(const void * input,const size_t len,const uint32_t seed)74 uint32_t xxh32(const void *input, const size_t len, const uint32_t seed)
75 {
76 	const uint8_t *p = (const uint8_t *)input;
77 	const uint8_t *b_end = p + len;
78 	uint32_t h32;
79 
80 	if (len >= 16) {
81 		const uint8_t *const limit = b_end - 16;
82 		uint32_t v1 = seed + PRIME32_1 + PRIME32_2;
83 		uint32_t v2 = seed + PRIME32_2;
84 		uint32_t v3 = seed + 0;
85 		uint32_t v4 = seed - PRIME32_1;
86 
87 		do {
88 			v1 = xxh32_round(v1, xxh_get_unaligned_le32(p));
89 			p += 4;
90 			v2 = xxh32_round(v2, xxh_get_unaligned_le32(p));
91 			p += 4;
92 			v3 = xxh32_round(v3, xxh_get_unaligned_le32(p));
93 			p += 4;
94 			v4 = xxh32_round(v4, xxh_get_unaligned_le32(p));
95 			p += 4;
96 		} while (p <= limit);
97 
98 		h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) +
99 			xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18);
100 	} else {
101 		h32 = seed + PRIME32_5;
102 	}
103 
104 	h32 += (uint32_t)len;
105 
106 	while (p + 4 <= b_end) {
107 		h32 += xxh_get_unaligned_le32(p) * PRIME32_3;
108 		h32 = xxh_rotl32(h32, 17) * PRIME32_4;
109 		p += 4;
110 	}
111 
112 	while (p < b_end) {
113 		h32 += (*p) * PRIME32_5;
114 		h32 = xxh_rotl32(h32, 11) * PRIME32_1;
115 		p++;
116 	}
117 
118 	h32 ^= h32 >> 15;
119 	h32 *= PRIME32_2;
120 	h32 ^= h32 >> 13;
121 	h32 *= PRIME32_3;
122 	h32 ^= h32 >> 16;
123 
124 	return h32;
125 }
126 
xxh64_round(uint64_t acc,const uint64_t input)127 static uint64_t xxh64_round(uint64_t acc, const uint64_t input)
128 {
129 	acc += input * PRIME64_2;
130 	acc = xxh_rotl64(acc, 31);
131 	acc *= PRIME64_1;
132 	return acc;
133 }
134 
xxh64_merge_round(uint64_t acc,uint64_t val)135 static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)
136 {
137 	val = xxh64_round(0, val);
138 	acc ^= val;
139 	acc = acc * PRIME64_1 + PRIME64_4;
140 	return acc;
141 }
142 
xxh64(const void * input,const size_t len,const uint64_t seed)143 uint64_t xxh64(const void *input, const size_t len, const uint64_t seed)
144 {
145 	const uint8_t *p = (const uint8_t *)input;
146 	const uint8_t *const b_end = p + len;
147 	uint64_t h64;
148 
149 	if (len >= 32) {
150 		const uint8_t *const limit = b_end - 32;
151 		uint64_t v1 = seed + PRIME64_1 + PRIME64_2;
152 		uint64_t v2 = seed + PRIME64_2;
153 		uint64_t v3 = seed + 0;
154 		uint64_t v4 = seed - PRIME64_1;
155 
156 		do {
157 			v1 = xxh64_round(v1, xxh_get_unaligned_le64(p));
158 			p += 8;
159 			v2 = xxh64_round(v2, xxh_get_unaligned_le64(p));
160 			p += 8;
161 			v3 = xxh64_round(v3, xxh_get_unaligned_le64(p));
162 			p += 8;
163 			v4 = xxh64_round(v4, xxh_get_unaligned_le64(p));
164 			p += 8;
165 		} while (p <= limit);
166 
167 		h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
168 			xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
169 		h64 = xxh64_merge_round(h64, v1);
170 		h64 = xxh64_merge_round(h64, v2);
171 		h64 = xxh64_merge_round(h64, v3);
172 		h64 = xxh64_merge_round(h64, v4);
173 
174 	} else {
175 		h64  = seed + PRIME64_5;
176 	}
177 
178 	h64 += (uint64_t)len;
179 
180 	while (p + 8 <= b_end) {
181 		const uint64_t k1 = xxh64_round(0, xxh_get_unaligned_le64(p));
182 
183 		h64 ^= k1;
184 		h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
185 		p += 8;
186 	}
187 
188 	if (p + 4 <= b_end) {
189 		h64 ^= (uint64_t)(xxh_get_unaligned_le32(p)) * PRIME64_1;
190 		h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
191 		p += 4;
192 	}
193 
194 	while (p < b_end) {
195 		h64 ^= (*p) * PRIME64_5;
196 		h64 = xxh_rotl64(h64, 11) * PRIME64_1;
197 		p++;
198 	}
199 
200 	h64 ^= h64 >> 33;
201 	h64 *= PRIME64_2;
202 	h64 ^= h64 >> 29;
203 	h64 *= PRIME64_3;
204 	h64 ^= h64 >> 32;
205 
206 	return h64;
207 }
208 
209 /*-**************************************************
210  * Advanced Hash Functions
211  ***************************************************/
xxh32_reset(struct xxh32_state * statePtr,const uint32_t seed)212 void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)
213 {
214 	/* use a local state for memcpy() to avoid strict-aliasing warnings */
215 	struct xxh32_state state;
216 
217 	memset(&state, 0, sizeof(state));
218 	state.v1 = seed + PRIME32_1 + PRIME32_2;
219 	state.v2 = seed + PRIME32_2;
220 	state.v3 = seed + 0;
221 	state.v4 = seed - PRIME32_1;
222 	memcpy(statePtr, &state, sizeof(state));
223 }
224 
xxh64_reset(struct xxh64_state * statePtr,const uint64_t seed)225 void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
226 {
227 	/* use a local state for memcpy() to avoid strict-aliasing warnings */
228 	struct xxh64_state state;
229 
230 	memset(&state, 0, sizeof(state));
231 	state.v1 = seed + PRIME64_1 + PRIME64_2;
232 	state.v2 = seed + PRIME64_2;
233 	state.v3 = seed + 0;
234 	state.v4 = seed - PRIME64_1;
235 	memcpy(statePtr, &state, sizeof(state));
236 }
237 
xxh32_update(struct xxh32_state * state,const void * input,const size_t len)238 int xxh32_update(struct xxh32_state *state, const void *input, const size_t len)
239 {
240 	const uint8_t *p = (const uint8_t *)input;
241 	const uint8_t *const b_end = p + len;
242 
243 	if (input == NULL)
244 		return -1;
245 
246 	state->total_len_32 += (uint32_t)len;
247 	state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
248 
249 	if (state->memsize + len < 16) { /* fill in tmp buffer */
250 		memcpy((uint8_t *)(state->mem32) + state->memsize, input, len);
251 		state->memsize += (uint32_t)len;
252 		return 0;
253 	}
254 
255 	if (state->memsize) { /* some data left from previous update */
256 		const uint32_t *p32 = state->mem32;
257 
258 		memcpy((uint8_t *)(state->mem32) + state->memsize, input,
259 			16 - state->memsize);
260 
261 		state->v1 = xxh32_round(state->v1, xxh_get_unaligned_le32(p32));
262 		p32++;
263 		state->v2 = xxh32_round(state->v2, xxh_get_unaligned_le32(p32));
264 		p32++;
265 		state->v3 = xxh32_round(state->v3, xxh_get_unaligned_le32(p32));
266 		p32++;
267 		state->v4 = xxh32_round(state->v4, xxh_get_unaligned_le32(p32));
268 		p32++;
269 
270 		p += 16-state->memsize;
271 		state->memsize = 0;
272 	}
273 
274 	if (p <= b_end - 16) {
275 		const uint8_t *const limit = b_end - 16;
276 		uint32_t v1 = state->v1;
277 		uint32_t v2 = state->v2;
278 		uint32_t v3 = state->v3;
279 		uint32_t v4 = state->v4;
280 
281 		do {
282 			v1 = xxh32_round(v1, xxh_get_unaligned_le32(p));
283 			p += 4;
284 			v2 = xxh32_round(v2, xxh_get_unaligned_le32(p));
285 			p += 4;
286 			v3 = xxh32_round(v3, xxh_get_unaligned_le32(p));
287 			p += 4;
288 			v4 = xxh32_round(v4, xxh_get_unaligned_le32(p));
289 			p += 4;
290 		} while (p <= limit);
291 
292 		state->v1 = v1;
293 		state->v2 = v2;
294 		state->v3 = v3;
295 		state->v4 = v4;
296 	}
297 
298 	if (p < b_end) {
299 		memcpy(state->mem32, p, (size_t)(b_end-p));
300 		state->memsize = (uint32_t)(b_end-p);
301 	}
302 
303 	return 0;
304 }
305 
xxh32_digest(const struct xxh32_state * state)306 uint32_t xxh32_digest(const struct xxh32_state *state)
307 {
308 	const uint8_t *p = (const uint8_t *)state->mem32;
309 	const uint8_t *const b_end = (const uint8_t *)(state->mem32) +
310 		state->memsize;
311 	uint32_t h32;
312 
313 	if (state->large_len) {
314 		h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) +
315 			xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18);
316 	} else {
317 		h32 = state->v3 /* == seed */ + PRIME32_5;
318 	}
319 
320 	h32 += state->total_len_32;
321 
322 	while (p + 4 <= b_end) {
323 		h32 += xxh_get_unaligned_le32(p) * PRIME32_3;
324 		h32 = xxh_rotl32(h32, 17) * PRIME32_4;
325 		p += 4;
326 	}
327 
328 	while (p < b_end) {
329 		h32 += (*p) * PRIME32_5;
330 		h32 = xxh_rotl32(h32, 11) * PRIME32_1;
331 		p++;
332 	}
333 
334 	h32 ^= h32 >> 15;
335 	h32 *= PRIME32_2;
336 	h32 ^= h32 >> 13;
337 	h32 *= PRIME32_3;
338 	h32 ^= h32 >> 16;
339 
340 	return h32;
341 }
342 
xxh64_update(struct xxh64_state * state,const void * input,const size_t len)343 int xxh64_update(struct xxh64_state *state, const void *input, const size_t len)
344 {
345 	const uint8_t *p = (const uint8_t *)input;
346 	const uint8_t *const b_end = p + len;
347 
348 	if (input == NULL)
349 		return -1;
350 
351 	state->total_len += len;
352 
353 	if (state->memsize + len < 32) { /* fill in tmp buffer */
354 		memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
355 		state->memsize += (uint32_t)len;
356 		return 0;
357 	}
358 
359 	if (state->memsize) { /* tmp buffer is full */
360 		uint64_t *p64 = state->mem64;
361 
362 		memcpy(((uint8_t *)p64) + state->memsize, input,
363 			32 - state->memsize);
364 
365 		state->v1 = xxh64_round(state->v1, xxh_get_unaligned_le64(p64));
366 		p64++;
367 		state->v2 = xxh64_round(state->v2, xxh_get_unaligned_le64(p64));
368 		p64++;
369 		state->v3 = xxh64_round(state->v3, xxh_get_unaligned_le64(p64));
370 		p64++;
371 		state->v4 = xxh64_round(state->v4, xxh_get_unaligned_le64(p64));
372 
373 		p += 32 - state->memsize;
374 		state->memsize = 0;
375 	}
376 
377 	if (p + 32 <= b_end) {
378 		const uint8_t *const limit = b_end - 32;
379 		uint64_t v1 = state->v1;
380 		uint64_t v2 = state->v2;
381 		uint64_t v3 = state->v3;
382 		uint64_t v4 = state->v4;
383 
384 		do {
385 			v1 = xxh64_round(v1, xxh_get_unaligned_le64(p));
386 			p += 8;
387 			v2 = xxh64_round(v2, xxh_get_unaligned_le64(p));
388 			p += 8;
389 			v3 = xxh64_round(v3, xxh_get_unaligned_le64(p));
390 			p += 8;
391 			v4 = xxh64_round(v4, xxh_get_unaligned_le64(p));
392 			p += 8;
393 		} while (p <= limit);
394 
395 		state->v1 = v1;
396 		state->v2 = v2;
397 		state->v3 = v3;
398 		state->v4 = v4;
399 	}
400 
401 	if (p < b_end) {
402 		memcpy(state->mem64, p, (size_t)(b_end-p));
403 		state->memsize = (uint32_t)(b_end - p);
404 	}
405 
406 	return 0;
407 }
408 
xxh64_digest(const struct xxh64_state * state)409 uint64_t xxh64_digest(const struct xxh64_state *state)
410 {
411 	const uint8_t *p = (const uint8_t *)state->mem64;
412 	const uint8_t *const b_end = (const uint8_t *)state->mem64 +
413 		state->memsize;
414 	uint64_t h64;
415 
416 	if (state->total_len >= 32) {
417 		const uint64_t v1 = state->v1;
418 		const uint64_t v2 = state->v2;
419 		const uint64_t v3 = state->v3;
420 		const uint64_t v4 = state->v4;
421 
422 		h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
423 			xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
424 		h64 = xxh64_merge_round(h64, v1);
425 		h64 = xxh64_merge_round(h64, v2);
426 		h64 = xxh64_merge_round(h64, v3);
427 		h64 = xxh64_merge_round(h64, v4);
428 	} else {
429 		h64  = state->v3 + PRIME64_5;
430 	}
431 
432 	h64 += (uint64_t)state->total_len;
433 
434 	while (p + 8 <= b_end) {
435 		const uint64_t k1 = xxh64_round(0, xxh_get_unaligned_le64(p));
436 
437 		h64 ^= k1;
438 		h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
439 		p += 8;
440 	}
441 
442 	if (p + 4 <= b_end) {
443 		h64 ^= (uint64_t)(xxh_get_unaligned_le32(p)) * PRIME64_1;
444 		h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
445 		p += 4;
446 	}
447 
448 	while (p < b_end) {
449 		h64 ^= (*p) * PRIME64_5;
450 		h64 = xxh_rotl64(h64, 11) * PRIME64_1;
451 		p++;
452 	}
453 
454 	h64 ^= h64 >> 33;
455 	h64 *= PRIME64_2;
456 	h64 ^= h64 >> 29;
457 	h64 *= PRIME64_3;
458 	h64 ^= h64 >> 32;
459 
460 	return h64;
461 }
462