1 // Copyright 2017-2023 Brian Smith.
2 //
3 // Permission to use, copy, modify, and/or distribute this software for any
4 // purpose with or without fee is hereby granted, provided that the above
5 // copyright notice and this permission notice appear in all copies.
6 //
7 // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
8 // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
10 // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 
15 // Indicates that the element is not encoded; there is no *R* factor
16 // that needs to be canceled out.
17 #[derive(Copy, Clone)]
18 pub enum Unencoded {}
19 
20 // Indicates that the element is encoded; the value has one *R*
21 // factor that needs to be canceled out.
22 #[derive(Copy, Clone)]
23 pub enum R {}
24 
25 // Indicates the element is encoded twice; the value has two *R*
26 // factors that need to be canceled out.
27 #[derive(Copy, Clone)]
28 pub enum RR {}
29 
30 // Indicates the element is inversely encoded; the value has one
31 // 1/*R* factor that needs to be canceled out.
32 #[derive(Copy, Clone)]
33 pub enum RInverse {}
34 
35 pub trait Encoding {}
36 
37 impl Encoding for RR {}
38 impl Encoding for R {}
39 impl Encoding for Unencoded {}
40 impl Encoding for RInverse {}
41 
42 /// The encoding of the result of a reduction.
43 pub trait ReductionEncoding {
44     type Output: Encoding;
45 }
46 
47 impl ReductionEncoding for RR {
48     type Output = R;
49 }
50 impl ReductionEncoding for R {
51     type Output = Unencoded;
52 }
53 impl ReductionEncoding for Unencoded {
54     type Output = RInverse;
55 }
56 
57 /// The encoding of the result of a multiplication.
58 pub trait ProductEncoding {
59     type Output: Encoding;
60 }
61 
62 impl<E: ReductionEncoding> ProductEncoding for (Unencoded, E) {
63     type Output = E::Output;
64 }
65 
66 impl<E: Encoding> ProductEncoding for (R, E) {
67     type Output = E;
68 }
69 
70 impl<E: ReductionEncoding> ProductEncoding for (RInverse, E)
71 where
72     E::Output: ReductionEncoding,
73 {
74     type Output = <<E as ReductionEncoding>::Output as ReductionEncoding>::Output;
75 }
76 
77 // XXX: Rust doesn't allow overlapping impls,
78 // TODO (if/when Rust allows it):
79 // impl<E1, E2: ReductionEncoding> ProductEncoding for
80 //         (E1, E2) {
81 //     type Output = <(E2, E1) as ProductEncoding>::Output;
82 // }
83 impl ProductEncoding for (RR, Unencoded) {
84     type Output = <(Unencoded, RR) as ProductEncoding>::Output;
85 }
86 impl ProductEncoding for (RR, RInverse) {
87     type Output = <(RInverse, RR) as ProductEncoding>::Output;
88 }
89 
90 #[allow(unused_imports)]
91 use {
92     super::n0::N0,
93     crate::{bssl, c, limb::Limb},
94 };
95 
96 #[cfg(not(any(
97     target_arch = "aarch64",
98     target_arch = "arm",
99     target_arch = "x86",
100     target_arch = "x86_64"
101 )))]
102 prefixed_export! {
103     unsafe fn bn_mul_mont(
104         r: *mut Limb,
105         a: *const Limb,
106         b: *const Limb,
107         n: *const Limb,
108         n0: &N0,
109         num_limbs: c::size_t,
110     ) {
111         // The mutable pointer `r` may alias `a` and/or `b`, so the lifetimes of
112         // any slices for `a` or `b` must not overlap with the lifetime of any
113         // mutable for `r`.
114 
115         // Nothing aliases `n`
116         let n = unsafe { core::slice::from_raw_parts(n, num_limbs) };
117 
118         let mut tmp = [0; 2 * super::BIGINT_MODULUS_MAX_LIMBS];
119         let tmp = &mut tmp[..(2 * num_limbs)];
120         {
121             let a: &[Limb] = unsafe { core::slice::from_raw_parts(a, num_limbs) };
122             let b: &[Limb] = unsafe { core::slice::from_raw_parts(b, num_limbs) };
123             limbs_mul(tmp, a, b);
124         }
125         let r: &mut [Limb] = unsafe { core::slice::from_raw_parts_mut(r, num_limbs) };
126         limbs_from_mont_in_place(r, tmp, n, n0);
127     }
128 }
129 
130 // `bigint` needs then when the `alloc` feature is enabled. `bn_mul_mont` above needs this when
131 // we are using the platforms for which we don't have `bn_mul_mont` in assembly.
132 #[cfg(any(
133     feature = "alloc",
134     not(any(
135         target_arch = "aarch64",
136         target_arch = "arm",
137         target_arch = "x86",
138         target_arch = "x86_64"
139     ))
140 ))]
limbs_from_mont_in_place(r: &mut [Limb], tmp: &mut [Limb], m: &[Limb], n0: &N0)141 pub(super) fn limbs_from_mont_in_place(r: &mut [Limb], tmp: &mut [Limb], m: &[Limb], n0: &N0) {
142     prefixed_extern! {
143         fn bn_from_montgomery_in_place(
144             r: *mut Limb,
145             num_r: c::size_t,
146             a: *mut Limb,
147             num_a: c::size_t,
148             n: *const Limb,
149             num_n: c::size_t,
150             n0: &N0,
151         ) -> bssl::Result;
152     }
153     Result::from(unsafe {
154         bn_from_montgomery_in_place(
155             r.as_mut_ptr(),
156             r.len(),
157             tmp.as_mut_ptr(),
158             tmp.len(),
159             m.as_ptr(),
160             m.len(),
161             n0,
162         )
163     })
164     .unwrap()
165 }
166 
167 #[cfg(not(any(
168     target_arch = "aarch64",
169     target_arch = "arm",
170     target_arch = "x86",
171     target_arch = "x86_64"
172 )))]
limbs_mul(r: &mut [Limb], a: &[Limb], b: &[Limb])173 fn limbs_mul(r: &mut [Limb], a: &[Limb], b: &[Limb]) {
174     debug_assert_eq!(r.len(), 2 * a.len());
175     debug_assert_eq!(a.len(), b.len());
176     let ab_len = a.len();
177 
178     r[..ab_len].fill(0);
179     for (i, &b_limb) in b.iter().enumerate() {
180         r[ab_len + i] = unsafe {
181             limbs_mul_add_limb(
182                 (&mut r[i..][..ab_len]).as_mut_ptr(),
183                 a.as_ptr(),
184                 b_limb,
185                 ab_len,
186             )
187         };
188     }
189 }
190 
191 #[cfg(any(
192     test,
193     not(any(
194         target_arch = "aarch64",
195         target_arch = "arm",
196         target_arch = "x86_64",
197         target_arch = "x86"
198     ))
199 ))]
200 prefixed_extern! {
201     // `r` must not alias `a`
202     #[must_use]
203     fn limbs_mul_add_limb(r: *mut Limb, a: *const Limb, b: Limb, num_limbs: c::size_t) -> Limb;
204 }
205 
206 #[cfg(test)]
207 mod tests {
208     use super::*;
209     use crate::limb::Limb;
210 
211     #[test]
212     // TODO: wasm
test_mul_add_words()213     fn test_mul_add_words() {
214         const ZERO: Limb = 0;
215         const MAX: Limb = ZERO.wrapping_sub(1);
216         static TEST_CASES: &[(&[Limb], &[Limb], Limb, Limb, &[Limb])] = &[
217             (&[0], &[0], 0, 0, &[0]),
218             (&[MAX], &[0], MAX, 0, &[MAX]),
219             (&[0], &[MAX], MAX, MAX - 1, &[1]),
220             (&[MAX], &[MAX], MAX, MAX, &[0]),
221             (&[0, 0], &[MAX, MAX], MAX, MAX - 1, &[1, MAX]),
222             (&[1, 0], &[MAX, MAX], MAX, MAX - 1, &[2, MAX]),
223             (&[MAX, 0], &[MAX, MAX], MAX, MAX, &[0, 0]),
224             (&[0, 1], &[MAX, MAX], MAX, MAX, &[1, 0]),
225             (&[MAX, MAX], &[MAX, MAX], MAX, MAX, &[0, MAX]),
226         ];
227 
228         for (i, (r_input, a, w, expected_retval, expected_r)) in TEST_CASES.iter().enumerate() {
229             extern crate std;
230             let mut r = std::vec::Vec::from(*r_input);
231             assert_eq!(r.len(), a.len()); // Sanity check
232             let actual_retval =
233                 unsafe { limbs_mul_add_limb(r.as_mut_ptr(), a.as_ptr(), *w, a.len()) };
234             assert_eq!(&r, expected_r, "{}: {:x?} != {:x?}", i, &r[..], expected_r);
235             assert_eq!(
236                 actual_retval, *expected_retval,
237                 "{}: {:x?} != {:x?}",
238                 i, actual_retval, *expected_retval
239             );
240         }
241     }
242 }
243