xref: /aosp_15_r20/external/avb/libavb/sha/sha256_impl.c (revision d289c2ba6de359471b23d594623b906876bc48a0)
1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2  * <[email protected]> under a BSD-style license. See below.
3  */
4 
5 /*
6  * FIPS 180-2 SHA-224/256/384/512 implementation
7  * Last update: 02/02/2007
8  * Issue date:  04/30/2005
9  *
10  * Copyright (C) 2005, 2007 Olivier Gay <[email protected]>
11  * All rights reserved.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the project nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include "../avb_sha.h"
39 #include "avb_crypto_ops_impl.h"
40 
41 #define SHFR(x, n) (x >> n)
42 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
43 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
44 #define CH(x, y, z) ((x & y) ^ (~x & z))
45 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
46 
47 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
48 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
49 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3))
50 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10))
51 
52 #define UNPACK32(x, str)                 \
53   {                                      \
54     *((str) + 3) = (uint8_t)((x));       \
55     *((str) + 2) = (uint8_t)((x) >> 8);  \
56     *((str) + 1) = (uint8_t)((x) >> 16); \
57     *((str) + 0) = (uint8_t)((x) >> 24); \
58   }
59 
60 #define UNPACK64(x, str)                         \
61   {                                              \
62     *((str) + 7) = (uint8_t)x;                   \
63     *((str) + 6) = (uint8_t)((uint64_t)x >> 8);  \
64     *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
65     *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
66     *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
67     *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
68     *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
69     *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
70   }
71 
72 #define PACK32(str, x)                                                    \
73   {                                                                       \
74     *(x) = ((uint32_t) * ((str) + 3)) | ((uint32_t) * ((str) + 2) << 8) | \
75            ((uint32_t) * ((str) + 1) << 16) |                             \
76            ((uint32_t) * ((str) + 0) << 24);                              \
77   }
78 
79 /* Macros used for loops unrolling */
80 
81 #define SHA256_SCR(i) \
82   { w[i] = SHA256_F4(w[i - 2]) + w[i - 7] + SHA256_F3(w[i - 15]) + w[i - 16]; }
83 
84 #define SHA256_EXP(a, b, c, d, e, f, g, h, j)                               \
85   {                                                                         \
86     t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha256_k[j] + \
87          w[j];                                                              \
88     t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]);                       \
89     wv[d] += t1;                                                            \
90     wv[h] = t1 + t2;                                                        \
91   }
92 
93 static const uint32_t sha256_h0[8] = {0x6a09e667,
94                                       0xbb67ae85,
95                                       0x3c6ef372,
96                                       0xa54ff53a,
97                                       0x510e527f,
98                                       0x9b05688c,
99                                       0x1f83d9ab,
100                                       0x5be0cd19};
101 
102 static const uint32_t sha256_k[64] = {
103     0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
104     0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
105     0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
106     0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
107     0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
108     0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
109     0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
110     0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
111     0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
112     0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
113     0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
114 
115 /* SHA-256 implementation */
avb_sha256_init(AvbSHA256Ctx * avb_ctx)116 void avb_sha256_init(AvbSHA256Ctx* avb_ctx) {
117   AvbSHA256ImplCtx* ctx = (AvbSHA256ImplCtx*)avb_ctx->reserved;
118 #ifndef UNROLL_LOOPS
119   int i;
120   for (i = 0; i < 8; i++) {
121     ctx->h[i] = sha256_h0[i];
122   }
123 #else
124   ctx->h[0] = sha256_h0[0];
125   ctx->h[1] = sha256_h0[1];
126   ctx->h[2] = sha256_h0[2];
127   ctx->h[3] = sha256_h0[3];
128   ctx->h[4] = sha256_h0[4];
129   ctx->h[5] = sha256_h0[5];
130   ctx->h[6] = sha256_h0[6];
131   ctx->h[7] = sha256_h0[7];
132 #endif /* !UNROLL_LOOPS */
133 
134   ctx->len = 0;
135   ctx->tot_len = 0;
136 }
137 
SHA256_transform(AvbSHA256ImplCtx * ctx,const uint8_t * message,size_t block_nb)138 static void SHA256_transform(AvbSHA256ImplCtx* ctx,
139                              const uint8_t* message,
140                              size_t block_nb) {
141   uint32_t w[64];
142   uint32_t wv[8];
143   uint32_t t1, t2;
144   const unsigned char* sub_block;
145   size_t i;
146 
147 #ifndef UNROLL_LOOPS
148   size_t j;
149 #endif
150 
151   for (i = 0; i < block_nb; i++) {
152     sub_block = message + (i << 6);
153 
154 #ifndef UNROLL_LOOPS
155     for (j = 0; j < 16; j++) {
156       PACK32(&sub_block[j << 2], &w[j]);
157     }
158 
159     for (j = 16; j < 64; j++) {
160       SHA256_SCR(j);
161     }
162 
163     for (j = 0; j < 8; j++) {
164       wv[j] = ctx->h[j];
165     }
166 
167     for (j = 0; j < 64; j++) {
168       t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha256_k[j] +
169            w[j];
170       t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
171       wv[7] = wv[6];
172       wv[6] = wv[5];
173       wv[5] = wv[4];
174       wv[4] = wv[3] + t1;
175       wv[3] = wv[2];
176       wv[2] = wv[1];
177       wv[1] = wv[0];
178       wv[0] = t1 + t2;
179     }
180 
181     for (j = 0; j < 8; j++) {
182       ctx->h[j] += wv[j];
183     }
184 #else
185     PACK32(&sub_block[0], &w[0]);
186     PACK32(&sub_block[4], &w[1]);
187     PACK32(&sub_block[8], &w[2]);
188     PACK32(&sub_block[12], &w[3]);
189     PACK32(&sub_block[16], &w[4]);
190     PACK32(&sub_block[20], &w[5]);
191     PACK32(&sub_block[24], &w[6]);
192     PACK32(&sub_block[28], &w[7]);
193     PACK32(&sub_block[32], &w[8]);
194     PACK32(&sub_block[36], &w[9]);
195     PACK32(&sub_block[40], &w[10]);
196     PACK32(&sub_block[44], &w[11]);
197     PACK32(&sub_block[48], &w[12]);
198     PACK32(&sub_block[52], &w[13]);
199     PACK32(&sub_block[56], &w[14]);
200     PACK32(&sub_block[60], &w[15]);
201 
202     SHA256_SCR(16);
203     SHA256_SCR(17);
204     SHA256_SCR(18);
205     SHA256_SCR(19);
206     SHA256_SCR(20);
207     SHA256_SCR(21);
208     SHA256_SCR(22);
209     SHA256_SCR(23);
210     SHA256_SCR(24);
211     SHA256_SCR(25);
212     SHA256_SCR(26);
213     SHA256_SCR(27);
214     SHA256_SCR(28);
215     SHA256_SCR(29);
216     SHA256_SCR(30);
217     SHA256_SCR(31);
218     SHA256_SCR(32);
219     SHA256_SCR(33);
220     SHA256_SCR(34);
221     SHA256_SCR(35);
222     SHA256_SCR(36);
223     SHA256_SCR(37);
224     SHA256_SCR(38);
225     SHA256_SCR(39);
226     SHA256_SCR(40);
227     SHA256_SCR(41);
228     SHA256_SCR(42);
229     SHA256_SCR(43);
230     SHA256_SCR(44);
231     SHA256_SCR(45);
232     SHA256_SCR(46);
233     SHA256_SCR(47);
234     SHA256_SCR(48);
235     SHA256_SCR(49);
236     SHA256_SCR(50);
237     SHA256_SCR(51);
238     SHA256_SCR(52);
239     SHA256_SCR(53);
240     SHA256_SCR(54);
241     SHA256_SCR(55);
242     SHA256_SCR(56);
243     SHA256_SCR(57);
244     SHA256_SCR(58);
245     SHA256_SCR(59);
246     SHA256_SCR(60);
247     SHA256_SCR(61);
248     SHA256_SCR(62);
249     SHA256_SCR(63);
250 
251     wv[0] = ctx->h[0];
252     wv[1] = ctx->h[1];
253     wv[2] = ctx->h[2];
254     wv[3] = ctx->h[3];
255     wv[4] = ctx->h[4];
256     wv[5] = ctx->h[5];
257     wv[6] = ctx->h[6];
258     wv[7] = ctx->h[7];
259 
260     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 0);
261     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 1);
262     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 2);
263     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 3);
264     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 4);
265     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 5);
266     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 6);
267     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 7);
268     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 8);
269     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 9);
270     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 10);
271     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 11);
272     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 12);
273     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 13);
274     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 14);
275     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 15);
276     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 16);
277     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 17);
278     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 18);
279     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 19);
280     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 20);
281     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 21);
282     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 22);
283     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 23);
284     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 24);
285     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 25);
286     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 26);
287     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 27);
288     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 28);
289     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 29);
290     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 30);
291     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 31);
292     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 32);
293     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 33);
294     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 34);
295     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 35);
296     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 36);
297     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 37);
298     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 38);
299     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 39);
300     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 40);
301     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 41);
302     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 42);
303     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 43);
304     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 44);
305     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 45);
306     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 46);
307     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 47);
308     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 48);
309     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 49);
310     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 50);
311     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 51);
312     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 52);
313     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 53);
314     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 54);
315     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 55);
316     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 56);
317     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 57);
318     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 58);
319     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 59);
320     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 60);
321     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 61);
322     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 62);
323     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 63);
324 
325     ctx->h[0] += wv[0];
326     ctx->h[1] += wv[1];
327     ctx->h[2] += wv[2];
328     ctx->h[3] += wv[3];
329     ctx->h[4] += wv[4];
330     ctx->h[5] += wv[5];
331     ctx->h[6] += wv[6];
332     ctx->h[7] += wv[7];
333 #endif /* !UNROLL_LOOPS */
334   }
335 }
336 
avb_sha256_update(AvbSHA256Ctx * avb_ctx,const uint8_t * data,size_t len)337 void avb_sha256_update(AvbSHA256Ctx* avb_ctx, const uint8_t* data, size_t len) {
338   AvbSHA256ImplCtx* ctx = (AvbSHA256ImplCtx*)avb_ctx->reserved;
339   size_t block_nb;
340   size_t new_len, rem_len, tmp_len;
341   const uint8_t* shifted_data;
342 
343   tmp_len = AVB_SHA256_BLOCK_SIZE - ctx->len;
344   rem_len = len < tmp_len ? len : tmp_len;
345 
346   avb_memcpy(&ctx->block[ctx->len], data, rem_len);
347 
348   if (ctx->len + len < AVB_SHA256_BLOCK_SIZE) {
349     ctx->len += len;
350     return;
351   }
352 
353   new_len = len - rem_len;
354   block_nb = new_len / AVB_SHA256_BLOCK_SIZE;
355 
356   shifted_data = data + rem_len;
357 
358   SHA256_transform(ctx, ctx->block, 1);
359   SHA256_transform(ctx, shifted_data, block_nb);
360 
361   rem_len = new_len % AVB_SHA256_BLOCK_SIZE;
362 
363   avb_memcpy(ctx->block, &shifted_data[block_nb << 6], rem_len);
364 
365   ctx->len = rem_len;
366   ctx->tot_len += (block_nb + 1) << 6;
367 }
368 
avb_sha256_final(AvbSHA256Ctx * avb_ctx)369 uint8_t* avb_sha256_final(AvbSHA256Ctx* avb_ctx) {
370   AvbSHA256ImplCtx* ctx = (AvbSHA256ImplCtx*)avb_ctx->reserved;
371   size_t block_nb;
372   size_t pm_len;
373   uint64_t len_b;
374 #ifndef UNROLL_LOOPS
375   size_t i;
376 #endif
377 
378   block_nb =
379       (1 + ((AVB_SHA256_BLOCK_SIZE - 9) < (ctx->len % AVB_SHA256_BLOCK_SIZE)));
380 
381   len_b = (ctx->tot_len + ctx->len) << 3;
382   pm_len = block_nb << 6;
383 
384   avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
385   ctx->block[ctx->len] = 0x80;
386   UNPACK64(len_b, ctx->block + pm_len - 8);
387 
388   SHA256_transform(ctx, ctx->block, block_nb);
389 
390 #ifndef UNROLL_LOOPS
391   for (i = 0; i < 8; i++) {
392     UNPACK32(ctx->h[i], &avb_ctx->buf[i << 2]);
393   }
394 #else
395   UNPACK32(ctx->h[0], &avb_ctx->buf[0]);
396   UNPACK32(ctx->h[1], &avb_ctx->buf[4]);
397   UNPACK32(ctx->h[2], &avb_ctx->buf[8]);
398   UNPACK32(ctx->h[3], &avb_ctx->buf[12]);
399   UNPACK32(ctx->h[4], &avb_ctx->buf[16]);
400   UNPACK32(ctx->h[5], &avb_ctx->buf[20]);
401   UNPACK32(ctx->h[6], &avb_ctx->buf[24]);
402   UNPACK32(ctx->h[7], &avb_ctx->buf[28]);
403 #endif /* !UNROLL_LOOPS */
404 
405   return avb_ctx->buf;
406 }
407