1 /* Copyright 2024 The ChromiumOS Authors
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6 #include "2common.h"
7 #include "2rsa.h"
8
9 /**
10 * Montgomery c[] = d[] - e[] if d[] > e[], c[] = d[] - e[] + n[] otherwise.
11 * Uses "Subtract with Carry" and "Add with Carry" instructions to optimize BigNum
12 * arithmetic. e[] will be overwritten with intermediate results.
13 */
sub_mod(uint32_t * c,uint32_t * ed,const uint32_t * n,const uint32_t arrsize)14 static void sub_mod(uint32_t *c, uint32_t *ed, const uint32_t *n, const uint32_t arrsize)
15 {
16 uint32_t borrow, tmp1, tmp2, i;
17
18 /* e[] = d[] - e[] */
19 uint32_t size_clobber = arrsize;
20 uint32_t *ed_clobber = ed;
21 asm (
22 "subs wzr, wzr, wzr\n\t" /* init carry flag for subtraction */
23 "1:\n\t"
24 "ldp %w[e], %w[d], [%[ed_ptr]]\n\t"
25 "sbcs %w[e], %w[d], %w[e]\n\t"
26 "str %w[e], [%[ed_ptr]], #8\n\t"
27 "sub %w[size], %w[size], #1\n\t"
28 "cbnz %w[size], 1b\n\t"
29 "cset %w[e], cc\n\t" /* "borrow" = carry flag is 0 (cleared) */
30 : [e] "=r" (borrow),
31 [d] "=r" (tmp1),
32 [size] "+r" (size_clobber),
33 [ed_ptr] "+r" (ed_clobber)
34 :: "cc", "memory"
35 );
36
37 if (borrow) {
38 /* e[] = e[] + n[] */
39 size_clobber = arrsize;
40 ed_clobber = ed;
41 asm volatile (
42 "adds wzr, wzr, wzr\n\t" /* init carry flag for addition */
43 "1:\n\t"
44 "ldr %w[e], [%[ed_ptr]]\n\t"
45 "ldr %w[n], [%[n_ptr]], #4\n\t"
46 "adcs %w[e], %w[e], %w[n]\n\t"
47 "str %w[e], [%[ed_ptr]], #8\n\t"
48 "sub %w[size], %w[size], #1\n\t"
49 "cbnz %w[size], 1b\n\t"
50 : [e] "=r" (tmp1),
51 [n] "=r" (tmp2),
52 [size] "+r" (size_clobber),
53 [ed_ptr] "+r" (ed_clobber),
54 [n_ptr] "+r" (n)
55 :: "cc", "memory"
56 );
57 }
58
59 /* c[] = e[] */
60 for (i = 0; i < arrsize; i++)
61 c[i] = ed[i * 2];
62 }
63
64 /**
65 * Montgomery c[] = a[] * b[] / R % mod (`ed` is a local scratch buffer)
66 *
67 * Algorithm according to https://eprint.iacr.org/2013/519.pdf and
68 * https://chromium-review.googlesource.com/5055251.
69 */
mont_mult(uint32_t * c,const uint32_t * a,const uint32_t * b,const uint32_t * n,uint32_t * ed,const uint32_t mu,const uint32_t arrsize)70 static void mont_mult(uint32_t *c,
71 const uint32_t *a,
72 const uint32_t *b,
73 const uint32_t *n,
74 uint32_t *ed,
75 const uint32_t mu,
76 const uint32_t arrsize)
77 {
78 const uint32_t mub0 = mu * b[0];
79 uint32_t i;
80
81 memset(ed, 0, arrsize * sizeof(uint32_t) * 2);
82
83 for (i = 0; i < arrsize; i++) {
84 const uint32_t c0 = ed[1] - ed[0];
85 const uint32_t muc0 = mu * c0;
86 const uint32_t a_i = a[i];
87 const uint32_t q = muc0 + mub0 * a_i;
88 const uint32_t *n_clobber = n;
89 const uint32_t *b_clobber = b;
90 void *ed_clobber = ed;
91 uint32_t size_clobber = arrsize - 1;
92 asm volatile (
93 /* v4.2d = always contains [0, 0] (for idempotent Add High Narrow) */
94 "movi v4.2d, #0\n\t"
95 /* v3.2s = "mul" = [q, a[i]] */
96 "fmov s3, %w[q]\n\t"
97 "mov v3.s[1], %w[a_i]\n\t"
98 /* v1.2s = "bmod" = [n[0], b[0]] */
99 "ldr s1, [%[n]], #4\n\t"
100 "ld1 {v1.s}[1], [%[b]], #4\n\t"
101 /* v2.2s = [e, d] */
102 "ldr d2, [%[ed]]\n\t"
103 "uxtl v2.2d, v2.2s\n\t"
104 /* v2.2d = "p01" = ed + bmod * mul */
105 "umlal v2.2d, v1.2s, v3.2s\n\t"
106 /* v2.2d = "t01" = MSB-half(p01) */
107 "addhn v2.2s, v2.2d, v4.2d\n\t"
108 /* for (j = 1; j < arrsize - 1; j++) */
109 "1:"
110 /* v0.2d = zero-extend(ed + t01) */
111 "ldr d0, [%[ed], #8]\n\t"
112 "uaddl v0.2d, v0.2s, v2.2s\n\t"
113 /* v1.2s = "bmod" = [n[j], b[j]] */
114 "ldr s1, [%[n]], #4\n\t"
115 "ld1 {v1.s}[1], [%[b]], #4\n\t"
116 /* v0.2d = "p01" = ed[j] + t01 + bmod * mul */
117 "umlal v0.2d, v1.2s, v3.2s\n\t"
118 /* v2.2s = "t01" = MSB-half(p01) */
119 "addhn v2.2s, v0.2d, v4.2d\n\t"
120 /* store ed[j - 1] = LSB-half(p01) */
121 "xtn v0.2s, v0.2d\n\t"
122 "str d0, [%[ed]], #8\n\t"
123 "subs %w[size], %w[size], #1\n\t"
124 "b.hi 1b\n\t"
125 /* store ed[arrsize - 1] = final t01 */
126 "str d2, [%[ed]]\n\t"
127 : [ed] "+r" (ed_clobber),
128 [n] "+r" (n_clobber),
129 [b] "+r" (b_clobber),
130 [size] "+r" (size_clobber)
131 : [q] "r" (q),
132 [a_i] "r" (a_i)
133 : "v0", "v1","v2", "v3", "v4", "cc", "memory"
134 );
135 }
136
137 sub_mod(c, ed, n, arrsize);
138 }
139
swap_bignumber_endianness(const void * in,void * out,size_t size_bytes)140 static void swap_bignumber_endianness(const void *in, void *out, size_t size_bytes)
141 {
142 const void *in_end = in + size_bytes;
143
144 /* REV64 can only swap within each 8-byte half of the 16-byte register, so use a
145 transposed STP to do the final swap of the two halves afterwards. */
146 asm volatile (
147 "1:\n\t"
148 "ldr q0, [%[in], #-16]!\n\t"
149 "rev64 v0.16b, v0.16b\n\t"
150 "mov d1, v0.d[1]\n\t"
151 "stp d1, d0, [%[out]], #16\n\t"
152 "subs %[size], %[size], #16\n\t"
153 "b.hi 1b\n\t"
154 : [in] "+r" (in_end),
155 [out] "+r" (out),
156 [size] "+r" (size_bytes)
157 :: "v0", "v1", "cc", "memory"
158 );
159 }
160
vb2ex_hwcrypto_modexp(const struct vb2_public_key * key,uint8_t * inout,void * workbuf,size_t workbuf_size,int exp)161 vb2_error_t vb2ex_hwcrypto_modexp(const struct vb2_public_key *key,
162 uint8_t *inout, void *workbuf,
163 size_t workbuf_size, int exp)
164 {
165 const uint32_t mu = -key->n0inv;
166 const uint32_t *n = key->n;
167 const uint32_t arrsize = key->arrsize;
168 uint32_t *a = workbuf;
169 uint32_t *aR = (void *)inout; /* Re-use location. */
170 uint32_t *aaR = a + arrsize;
171 uint32_t *aaa = aaR; /* Re-use location. */
172 uint32_t *ed = aaR + arrsize; /* 8-byte align guaranteed by VB2_WORKBUF_ALIGN */
173 uint32_t i;
174
175 if (exp != 65537 || arrsize % 16 != 0 ||
176 (void *)&ed[arrsize * 2] - workbuf > workbuf_size)
177 return VB2_ERROR_EX_HWCRYPTO_UNSUPPORTED;
178
179 /* Convert from big endian byte array to little endian word array. */
180 swap_bignumber_endianness(inout, a, arrsize * sizeof(uint32_t));
181
182 mont_mult(aR, a, key->rr, n, ed, mu, arrsize); /* aR = a * RR / R mod M */
183 for (i = 0; i < 16; i += 2) {
184 mont_mult(aaR, aR, aR, n, ed, mu, arrsize); /* aaR = aR * aR / R mod M */
185 mont_mult(aR, aaR, aaR, n, ed, mu, arrsize); /* aR = aaR * aaR / R mod M */
186 }
187 mont_mult(aaa, aR, a, n, ed, mu, arrsize); /* aaa = aR * a / R mod M */
188
189 /* Convert back to bigendian byte array */
190 swap_bignumber_endianness(aaa, inout, arrsize * sizeof(uint32_t));
191
192 return VB2_SUCCESS;
193 }
194