xref: /aosp_15_r20/external/crosvm/kernel_loader/src/arm64.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Linux arm64 kernel loader.
6 //! <https://www.kernel.org/doc/Documentation/arm64/booting.txt>
7 
8 use std::cmp::max;
9 use std::io;
10 use std::io::BufRead;
11 use std::io::Read;
12 use std::io::Seek;
13 use std::io::SeekFrom;
14 use std::mem::size_of_val;
15 
16 use base::warn;
17 use base::FileGetLen;
18 use base::FileReadWriteAtVolatile;
19 use base::VolatileSlice;
20 use data_model::Le32;
21 use data_model::Le64;
22 use lz4_flex::frame::FrameDecoder as Lz4FrameDecoder;
23 use resources::AddressRange;
24 use vm_memory::GuestAddress;
25 use vm_memory::GuestMemory;
26 use zerocopy::AsBytes;
27 use zerocopy::FromBytes;
28 use zerocopy::FromZeroes;
29 
30 use crate::Error;
31 use crate::LoadedKernel;
32 use crate::Result;
33 
34 #[derive(Copy, Clone, AsBytes, FromZeroes, FromBytes)]
35 #[allow(unused)]
36 #[repr(C)]
37 struct Arm64ImageHeader {
38     code0: Le32,
39     code1: Le32,
40     text_offset: Le64,
41     image_size: Le64,
42     flags: Le64,
43     res2: Le64,
44     res3: Le64,
45     res4: Le64,
46     magic: Le32,
47     res5: Le32,
48 }
49 
50 const ARM64_IMAGE_MAGIC: u32 = 0x644d5241; // "ARM\x64"
51 
52 const ARM64_IMAGE_FLAG_BE_MASK: u64 = 0x1;
53 
54 const ARM64_TEXT_OFFSET_DEFAULT: u64 = 0x80000;
55 
56 impl Arm64ImageHeader {
parse_load_addr(&self, kernel_start: GuestAddress) -> Result<GuestAddress>57     fn parse_load_addr(&self, kernel_start: GuestAddress) -> Result<GuestAddress> {
58         let magic: u32 = self.magic.into();
59         if magic != ARM64_IMAGE_MAGIC {
60             return Err(Error::InvalidMagicNumber);
61         }
62 
63         let flags: u64 = self.flags.into();
64         if flags & ARM64_IMAGE_FLAG_BE_MASK != 0 {
65             return Err(Error::BigEndianOnLittle);
66         }
67 
68         let mut text_offset: u64 = self.text_offset.into();
69         let image_size: u64 = self.image_size.into();
70 
71         if image_size == 0 {
72             warn!("arm64 Image header has an effective size of zero");
73             // arm64/booting.txt:
74             // "Where image_size is zero, text_offset can be assumed to be 0x80000."
75             text_offset = ARM64_TEXT_OFFSET_DEFAULT;
76         }
77 
78         // Load the image into guest memory at `text_offset` bytes past `kernel_start`.
79         kernel_start
80             .checked_add(text_offset)
81             .ok_or(Error::InvalidKernelOffset)
82     }
83 }
84 
load_arm64_kernel<F>( guest_mem: &GuestMemory, kernel_start: GuestAddress, kernel_image: &mut F, ) -> Result<LoadedKernel> where F: FileReadWriteAtVolatile + FileGetLen,85 pub fn load_arm64_kernel<F>(
86     guest_mem: &GuestMemory,
87     kernel_start: GuestAddress,
88     kernel_image: &mut F,
89 ) -> Result<LoadedKernel>
90 where
91     F: FileReadWriteAtVolatile + FileGetLen,
92 {
93     let mut header = Arm64ImageHeader::new_zeroed();
94     kernel_image
95         .read_exact_at_volatile(VolatileSlice::new(header.as_bytes_mut()), 0)
96         .map_err(|_| Error::ReadHeader)?;
97     let load_addr = header.parse_load_addr(kernel_start)?;
98 
99     let file_size = kernel_image.get_len().map_err(|_| Error::SeekKernelEnd)?;
100     let load_size = usize::try_from(file_size).map_err(|_| Error::InvalidKernelSize)?;
101     let range_size = max(file_size, u64::from(header.image_size));
102 
103     let guest_slice = guest_mem
104         .get_slice_at_addr(load_addr, load_size)
105         .map_err(|_| Error::ReadKernelImage)?;
106     kernel_image
107         .read_exact_at_volatile(guest_slice, 0)
108         .map_err(|_| Error::ReadKernelImage)?;
109 
110     Ok(LoadedKernel {
111         size: file_size,
112         address_range: AddressRange::from_start_and_size(load_addr.offset(), range_size)
113             .ok_or(Error::InvalidKernelSize)?,
114         entry: load_addr,
115     })
116 }
117 
load_arm64_kernel_from_reader<F: BufRead>( guest_mem: &GuestMemory, kernel_start: GuestAddress, mut kernel_image: F, ) -> Result<LoadedKernel>118 fn load_arm64_kernel_from_reader<F: BufRead>(
119     guest_mem: &GuestMemory,
120     kernel_start: GuestAddress,
121     mut kernel_image: F,
122 ) -> Result<LoadedKernel> {
123     let mut header = Arm64ImageHeader::new_zeroed();
124     let header_size = u64::try_from(size_of_val(&header)).unwrap();
125 
126     // Read and parse the kernel header.
127     kernel_image
128         .read_exact(header.as_bytes_mut())
129         .map_err(|_| Error::ReadHeader)?;
130     let load_addr = header.parse_load_addr(kernel_start)?;
131 
132     // Write the parsed kernel header to memory. Avoid rewinding the reader back to the start.
133     guest_mem
134         .write_all_at_addr(header.as_bytes(), load_addr)
135         .map_err(|_| Error::ReadKernelImage)?;
136 
137     // Continue reading from the source and copy the kernel image into GuestMemory.
138     let mut current_addr = load_addr
139         .checked_add(header_size)
140         .ok_or(Error::InvalidKernelSize)?;
141     loop {
142         let buf = match kernel_image.fill_buf() {
143             Ok([]) => break,
144             Ok(buf) => buf,
145             Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
146             Err(_) => return Err(Error::ReadKernelImage),
147         };
148 
149         guest_mem
150             .write_all_at_addr(buf, current_addr)
151             .map_err(|_| Error::ReadKernelImage)?;
152 
153         let consumed = buf.len();
154         kernel_image.consume(consumed);
155 
156         let offset = u64::try_from(consumed).map_err(|_| Error::InvalidKernelSize)?;
157         current_addr = current_addr
158             .checked_add(offset)
159             .ok_or(Error::InvalidKernelSize)?;
160     }
161 
162     let file_size = current_addr.offset_from(load_addr);
163     let range_size = max(file_size, u64::from(header.image_size));
164     Ok(LoadedKernel {
165         size: file_size,
166         address_range: AddressRange::from_start_and_size(load_addr.offset(), range_size)
167             .ok_or(Error::InvalidKernelSize)?,
168         entry: load_addr,
169     })
170 }
171 
load_arm64_kernel_lz4<F: Read + Seek>( guest_mem: &GuestMemory, kernel_start: GuestAddress, mut kernel_image: F, ) -> Result<LoadedKernel>172 pub fn load_arm64_kernel_lz4<F: Read + Seek>(
173     guest_mem: &GuestMemory,
174     kernel_start: GuestAddress,
175     mut kernel_image: F,
176 ) -> Result<LoadedKernel> {
177     kernel_image
178         .seek(SeekFrom::Start(0))
179         .map_err(|_| Error::SeekKernelStart)?;
180     load_arm64_kernel_from_reader(
181         guest_mem,
182         kernel_start,
183         &mut Lz4FrameDecoder::new(kernel_image),
184     )
185 }
186 
187 #[cfg(test)]
188 mod test {
189     use std::fs::File;
190     use std::io::Seek;
191     use std::io::SeekFrom;
192     use std::io::Write;
193 
194     use tempfile::tempfile;
195     use vm_memory::GuestAddress;
196     use vm_memory::GuestMemory;
197 
198     use crate::load_arm64_kernel;
199     use crate::load_arm64_kernel_lz4;
200     use crate::Error;
201 
202     const MEM_SIZE: u64 = 0x200_0000;
203 
create_guest_mem() -> GuestMemory204     fn create_guest_mem() -> GuestMemory {
205         GuestMemory::new(&[(GuestAddress(0x0), MEM_SIZE)]).unwrap()
206     }
207 
208     #[allow(clippy::unusual_byte_groupings)]
write_valid_kernel() -> File209     fn write_valid_kernel() -> File {
210         let mut f = tempfile().expect("failed to create tempfile");
211 
212         f.write_all(&[0x00, 0xC0, 0x2E, 0x14]).unwrap(); // code0
213         f.write_all(&[0x00, 0x00, 0x00, 0x00]).unwrap(); // code1
214         f.write_all(&0x00000000_00E70000u64.to_le_bytes()).unwrap(); // text_offset
215         f.write_all(&0x00000000_0000000Au64.to_le_bytes()).unwrap(); // image_size
216         f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // flags
217         f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // res2
218         f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // res3
219         f.write_all(&0x00000000_00000000u64.to_le_bytes()).unwrap(); // res4
220         f.write_all(&0x644D5241u32.to_le_bytes()).unwrap(); // magic
221         f.write_all(&0x00000000u32.to_le_bytes()).unwrap(); // res5
222 
223         f.set_len(0xDC3808).unwrap();
224         f
225     }
226 
mutate_file(mut f: &File, offset: u64, val: &[u8])227     fn mutate_file(mut f: &File, offset: u64, val: &[u8]) {
228         f.seek(SeekFrom::Start(offset))
229             .expect("failed to seek file");
230         f.write_all(val)
231             .expect("failed to write mutated value to file");
232     }
233 
234     #[test]
load_arm64_valid()235     fn load_arm64_valid() {
236         let gm = create_guest_mem();
237         let kernel_addr = GuestAddress(2 * 1024 * 1024);
238         let mut f = write_valid_kernel();
239         let kernel = load_arm64_kernel(&gm, kernel_addr, &mut f).unwrap();
240         assert_eq!(kernel.address_range.start, 0x107_0000);
241         assert_eq!(kernel.address_range.end, 0x1E3_3807);
242         assert_eq!(kernel.size, 0xDC_3808);
243         assert_eq!(kernel.entry, GuestAddress(0x107_0000));
244     }
245 
246     #[test]
load_arm64_image_size_zero()247     fn load_arm64_image_size_zero() {
248         let gm = create_guest_mem();
249         let kernel_addr = GuestAddress(2 * 1024 * 1024);
250         let mut f = write_valid_kernel();
251 
252         // Set image_size = 0 and validate the default text_offset is applied.
253         mutate_file(&f, 16, &0u64.to_le_bytes());
254 
255         let kernel = load_arm64_kernel(&gm, kernel_addr, &mut f).unwrap();
256         assert_eq!(kernel.address_range.start, 0x28_0000);
257         assert_eq!(kernel.address_range.end, 0x104_3807);
258         assert_eq!(kernel.size, 0xDC_3808);
259         assert_eq!(kernel.entry, GuestAddress(0x28_0000));
260     }
261 
262     #[test]
load_arm64_bad_magic()263     fn load_arm64_bad_magic() {
264         let gm = create_guest_mem();
265         let kernel_addr = GuestAddress(2 * 1024 * 1024);
266         let mut f = write_valid_kernel();
267 
268         // Mutate magic number so it doesn't match
269         mutate_file(&f, 56, &[0xCC, 0xCC, 0xCC, 0xCC]);
270 
271         assert_eq!(
272             load_arm64_kernel(&gm, kernel_addr, &mut f),
273             Err(Error::InvalidMagicNumber)
274         );
275     }
276 
write_valid_kernel_lz4() -> File277     fn write_valid_kernel_lz4() -> File {
278         let mut f = tempfile().expect("failed to create tempfile");
279 
280         f.write_all(&0x184d2204u32.to_le_bytes()).unwrap(); // magic
281         f.write_all(&[0x44, 0x70, 0x1d]).unwrap(); // flg, bd, hc
282 
283         // Compressed block #1.
284         f.write_all(&0x00004065u32.to_le_bytes()).unwrap();
285         f.write_all(&[
286             0x51, 0x00, 0xc0, 0x2e, 0x14, 0x00, 0x01, 0x00, 0x11, 0xe7, 0x06, 0x00, 0x11, 0x0a,
287             0x06, 0x00, 0x0f, 0x02, 0x00, 0x0f, 0x4f, 0x41, 0x52, 0x4d, 0x64, 0x26, 0x00, 0x0f,
288             0x0f, 0x02, 0x00,
289         ])
290         .unwrap();
291         f.write_all(&[0xff; 16447]).unwrap();
292 
293         // Compressed block #2.
294         f.write_all(&0x000050c9u32.to_le_bytes()).unwrap();
295         f.write_all(&[
296             0x00, 0x00, 0x00, 0x4b, 0x40, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
297         ])
298         .unwrap();
299         f.write_all(&[0xff; 16448]).unwrap();
300 
301         // Compressed block #3.
302         f.write_all(&0x00005027u32.to_le_bytes()).unwrap();
303         f.write_all(&[
304             0x00, 0x00, 0x00, 0x4b, 0x40, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
305         ])
306         .unwrap();
307         f.write_all(&[0xff; 16448]).unwrap();
308 
309         // Compressed block #4.
310         f.write_all(&0x00005027u32.to_le_bytes()).unwrap();
311         f.write_all(&[
312             0x00, 0x00, 0x00, 0x5f, 0x1c, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
313         ])
314         .unwrap();
315         f.write_all(&[0xff; 7252]).unwrap();
316         f.write_all(&[0x43, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00])
317             .unwrap();
318 
319         // EndMark
320         f.write_all(&0x00000000u32.to_le_bytes()).unwrap();
321 
322         // Checksum
323         f.write_all(&0x22a9944cu32.to_le_bytes()).unwrap();
324 
325         f
326     }
327 
write_valid_kernel_lz4_legacy() -> File328     fn write_valid_kernel_lz4_legacy() -> File {
329         let mut f = tempfile().expect("failed to create tempfile");
330 
331         f.write_all(&0x184c2102u32.to_le_bytes()).unwrap(); // magic
332 
333         // Compressed block #1.
334         f.write_all(&0x000080a6u32.to_le_bytes()).unwrap();
335         f.write_all(&[
336             0x51, 0x00, 0xc0, 0x2e, 0x14, 0x00, 0x01, 0x00, 0x11, 0xe7, 0x06, 0x00, 0x11, 0x0a,
337             0x06, 0x00, 0x0f, 0x02, 0x00, 0x0f, 0x4f, 0x41, 0x52, 0x4d, 0x64, 0x26, 0x00, 0x0f,
338             0x0f, 0x02, 0x00,
339         ])
340         .unwrap();
341         f.write_all(&[0xff; 32896]).unwrap();
342 
343         // Compressed block #2.
344         f.write_all(&0x0000500au32.to_le_bytes()).unwrap();
345         f.write_all(&[
346             0x00, 0x00, 0x00, 0x9f, 0x5c, 0x00, 0x00, 0x1f, 0x00, 0x01, 0x00,
347         ])
348         .unwrap();
349         f.write_all(&[0xff; 23700]).unwrap();
350         f.write_all(&[0x83, 0x50, 0x00]).unwrap();
351 
352         // EndMark
353         f.write_all(&[0x00, 0x00, 0x00, 0x00]).unwrap();
354 
355         f
356     }
357 
358     #[test]
load_arm64_lz4_valid()359     fn load_arm64_lz4_valid() {
360         let gm = create_guest_mem();
361         let kernel_addr = GuestAddress(2 * 1024 * 1024);
362         let mut f = write_valid_kernel_lz4();
363         let kernel = load_arm64_kernel_lz4(&gm, kernel_addr, &mut f).unwrap();
364         assert_eq!(kernel.address_range.start, 0x107_0000);
365         assert_eq!(kernel.address_range.end, 0x1E3_3807);
366         assert_eq!(kernel.size, 0xDC_3808);
367         assert_eq!(kernel.entry, GuestAddress(0x107_0000));
368     }
369 
370     #[test]
load_arm64_lz4_bad_magic()371     fn load_arm64_lz4_bad_magic() {
372         let gm = create_guest_mem();
373         let kernel_addr = GuestAddress(2 * 1024 * 1024);
374         let mut f = write_valid_kernel_lz4();
375 
376         mutate_file(&f, 0, &[0xCC, 0xCC, 0xCC, 0xCC]);
377 
378         assert_eq!(
379             load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
380             Err(Error::ReadHeader)
381         );
382     }
383 
384     #[test]
load_arm64_lz4_bad_block()385     fn load_arm64_lz4_bad_block() {
386         let gm = create_guest_mem();
387         let kernel_addr = GuestAddress(2 * 1024 * 1024);
388         let mut f = write_valid_kernel_lz4();
389 
390         mutate_file(&f, 7, &[0xCC, 0xCC, 0xCC, 0xCC]);
391 
392         assert_eq!(
393             load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
394             Err(Error::ReadHeader)
395         );
396     }
397 
398     #[test]
load_arm64_lz4_legacy_valid()399     fn load_arm64_lz4_legacy_valid() {
400         let gm = create_guest_mem();
401         let kernel_addr = GuestAddress(2 * 1024 * 1024);
402         let mut f = write_valid_kernel_lz4_legacy();
403         let kernel = load_arm64_kernel_lz4(&gm, kernel_addr, &mut f).unwrap();
404         assert_eq!(kernel.address_range.start, 0x107_0000);
405         assert_eq!(kernel.address_range.end, 0x1E3_3807);
406         assert_eq!(kernel.size, 0xDC_3808);
407         assert_eq!(kernel.entry, GuestAddress(0x107_0000));
408     }
409 
410     #[test]
load_arm64_lz4_legacy_bad_magic()411     fn load_arm64_lz4_legacy_bad_magic() {
412         let gm = create_guest_mem();
413         let kernel_addr = GuestAddress(2 * 1024 * 1024);
414         let mut f = write_valid_kernel_lz4_legacy();
415 
416         mutate_file(&f, 0, &[0xCC, 0xCC, 0xCC, 0xCC]);
417 
418         assert_eq!(
419             load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
420             Err(Error::ReadHeader)
421         );
422     }
423 
424     #[test]
load_arm64_lz4_legacy_bad_block()425     fn load_arm64_lz4_legacy_bad_block() {
426         let gm = create_guest_mem();
427         let kernel_addr = GuestAddress(2 * 1024 * 1024);
428         let mut f = write_valid_kernel_lz4_legacy();
429 
430         mutate_file(&f, 4, &[0xCC, 0xCC, 0xCC, 0xCC]);
431 
432         assert_eq!(
433             load_arm64_kernel_lz4(&gm, kernel_addr, &mut f),
434             Err(Error::ReadHeader)
435         );
436     }
437 }
438