1 // Copyright 2024, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 //! This file provides APIs for loading, verifying and booting Fuchsia/Zircon.
16
17 use crate::{gbl_print, gbl_println, image_buffer::ImageBuffer, GblOps, Result as GblResult};
18 pub use abr::{get_and_clear_one_shot_bootloader, get_boot_slot, Ops as AbrOps, SlotIndex};
19 use core::{fmt::Write, mem::MaybeUninit, num::NonZeroUsize};
20 use liberror::{Error, Result};
21 use libutils::aligned_subslice;
22 use safemath::SafeNum;
23 use zbi::{ZbiContainer, ZbiFlags, ZbiHeader, ZbiType};
24 use zerocopy::AsBytes;
25
26 mod vboot;
27 use vboot::{copy_items_after_kernel, zircon_verify_kernel};
28
29 /// Kernel load address alignment. Value taken from
30 /// https://fuchsia.googlesource.com/fuchsia/+/4f204d8a0243e84a86af4c527a8edcc1ace1615f/zircon/kernel/target/arm64/boot-shim/BUILD.gn#38
31 pub const ZIRCON_KERNEL_ALIGN: usize = 64 * 1024;
32
33 const DURABLE_BOOT_PARTITION: &str = "durable_boot";
34 const MISC_PARTITION: &str = "misc";
35 const ABR_PARTITION_ALIASES: &[&str] = &[DURABLE_BOOT_PARTITION, MISC_PARTITION];
36
37 /// Helper function to find partition given a list of possible aliases.
find_part_aliases<'a, 'b, 'c>( ops: &mut (impl GblOps<'a, 'c> + ?Sized), aliases: &'b [&str], ) -> Result<&'b str>38 fn find_part_aliases<'a, 'b, 'c>(
39 ops: &mut (impl GblOps<'a, 'c> + ?Sized),
40 aliases: &'b [&str],
41 ) -> Result<&'b str> {
42 Ok(*aliases
43 .iter()
44 .find(|v| matches!(ops.partition_size(v), Ok(Some(_))))
45 .ok_or(Error::NotFound)?)
46 }
47
48 /// `GblAbrOps` wraps an object implementing `GblOps` and implements the `abr::Ops` trait.
49 pub(crate) struct GblAbrOps<'a, T: ?Sized>(pub &'a mut T);
50
51 impl<'b, 'c, T: GblOps<'b, 'c> + ?Sized> AbrOps for GblAbrOps<'_, T> {
read_abr_metadata(&mut self, out: &mut [u8]) -> Result<()>52 fn read_abr_metadata(&mut self, out: &mut [u8]) -> Result<()> {
53 let part = find_part_aliases(self.0, &ABR_PARTITION_ALIASES)?;
54 self.0.read_from_partition_sync(part, 0, out)
55 }
56
write_abr_metadata(&mut self, data: &mut [u8]) -> Result<()>57 fn write_abr_metadata(&mut self, data: &mut [u8]) -> Result<()> {
58 let part = find_part_aliases(self.0, &ABR_PARTITION_ALIASES)?;
59 self.0.write_to_partition_sync(part, 0, data)
60 }
61
console(&mut self) -> Option<&mut dyn Write>62 fn console(&mut self) -> Option<&mut dyn Write> {
63 self.0.console_out()
64 }
65 }
66
67 /// A helper for splitting the trailing unused portion of a ZBI container buffer.
68 ///
69 /// Returns a tuple of used subslice and unused subslice
zbi_split_unused_buffer(zbi: &mut [u8]) -> GblResult<(&mut [u8], &mut [u8])>70 fn zbi_split_unused_buffer(zbi: &mut [u8]) -> GblResult<(&mut [u8], &mut [u8])> {
71 Ok(zbi.split_at_mut(ZbiContainer::parse(&zbi[..])?.container_size()?))
72 }
73
74 /// Relocates a ZBI kernel to a different buffer.
75 ///
76 /// * `dest` must be aligned to `ZIRCON_KERNEL_ALIGN`.
77 /// * `dest` will be a ZBI container containing only the kernel item.
relocate_kernel(kernel: &[u8], dest: &mut [u8]) -> GblResult<()>78 pub fn relocate_kernel(kernel: &[u8], dest: &mut [u8]) -> GblResult<()> {
79 if (dest.as_ptr() as usize % ZIRCON_KERNEL_ALIGN) != 0 {
80 return Err(Error::InvalidAlignment.into());
81 }
82
83 let kernel = ZbiContainer::parse(&kernel[..])?;
84 let kernel_item = kernel.get_bootable_kernel_item()?;
85 let hdr = kernel_item.header;
86 // Creates a new ZBI kernel item at the destination.
87 let mut relocated = ZbiContainer::new(&mut dest[..])?;
88 let zbi_type = ZbiType::try_from(hdr.type_)?;
89 relocated.create_entry_with_payload(
90 zbi_type,
91 hdr.extra,
92 hdr.get_flags() & !ZbiFlags::CRC32,
93 kernel_item.payload.as_bytes(),
94 )?;
95 let (_, reserved_memory_size) = relocated.get_kernel_entry_and_reserved_memory_size()?;
96 let buf_len = u64::try_from(zbi_split_unused_buffer(dest)?.1.len()).map_err(Error::from)?;
97 match reserved_memory_size > buf_len {
98 true => Err(Error::BufferTooSmall(None).into()),
99 _ => Ok(()),
100 }
101 }
102
103 /// Relocate a ZBI kernel to the trailing unused buffer.
104 ///
105 /// Returns the original kernel subslice and relocated kernel subslice.
relocate_to_tail(kernel: &mut [u8]) -> GblResult<(&mut [u8], &mut [u8])>106 pub fn relocate_to_tail(kernel: &mut [u8]) -> GblResult<(&mut [u8], &mut [u8])> {
107 let reloc_size = ZbiContainer::parse(&kernel[..])?.get_buffer_size_for_kernel_relocation()?;
108 let (original, relocated) = zbi_split_unused_buffer(kernel)?;
109 let relocated = aligned_subslice(relocated, ZIRCON_KERNEL_ALIGN)?;
110 let off = (SafeNum::from(relocated.len()) - reloc_size)
111 .round_down(ZIRCON_KERNEL_ALIGN)
112 .try_into()
113 .map_err(Error::from)?;
114 let relocated = &mut relocated[off..];
115 relocate_kernel(original, relocated)?;
116 let reloc_addr = relocated.as_ptr() as usize;
117 Ok(kernel.split_at_mut(reloc_addr.checked_sub(kernel.as_ptr() as usize).unwrap()))
118 }
119
120 /// Gets the list of aliases for slotted/slotless zircon partition name.
zircon_part_name_aliases(slot: Option<SlotIndex>) -> &'static [&'static str]121 fn zircon_part_name_aliases(slot: Option<SlotIndex>) -> &'static [&'static str] {
122 match slot {
123 Some(SlotIndex::A) => &["zircon_a", "zircon-a"][..],
124 Some(SlotIndex::B) => &["zircon_b", "zircon-b"][..],
125 Some(SlotIndex::R) => &["zircon_r", "zircon-r"][..],
126 _ => &["zircon"][..],
127 }
128 }
129
130 /// Gets the slotted/slotless standard zircon partition name.
zircon_part_name(slot: Option<SlotIndex>) -> &'static str131 pub fn zircon_part_name(slot: Option<SlotIndex>) -> &'static str {
132 zircon_part_name_aliases(slot)[0]
133 }
134
135 /// Gets the ZBI command line string for the current slot.
slot_cmd_line(slot: SlotIndex) -> &'static str136 fn slot_cmd_line(slot: SlotIndex) -> &'static str {
137 match slot {
138 SlotIndex::A => "zvb.current_slot=a",
139 SlotIndex::B => "zvb.current_slot=b",
140 SlotIndex::R => "zvb.current_slot=r",
141 }
142 }
143
144 /// Loads and verifies a kernel of the given slot or slotless.
145 ///
146 /// # Args
147 ///
148 /// * `ops`: A reference to an object that implements `GblOps`.
149 /// * `slot`: None if slotless. Otherwise the target slot to boot.
150 /// * `slot_booted_successfully`: whether the slot is known-successful boot, and if so then this
151 /// function will update the anti-rollbacks.
152 ///
153 /// On success returns a pair containing: 1. the slice of the ZBI container with device ZBI items
154 /// and 2. the slice of container containing the kernel.
zircon_load_verify<'a, 'd>( ops: &mut impl GblOps<'a, 'd>, slot: Option<SlotIndex>, slot_booted_successfully: bool, ) -> GblResult<(ImageBuffer<'d>, ImageBuffer<'d>)>155 pub fn zircon_load_verify<'a, 'd>(
156 ops: &mut impl GblOps<'a, 'd>,
157 slot: Option<SlotIndex>,
158 slot_booted_successfully: bool,
159 ) -> GblResult<(ImageBuffer<'d>, ImageBuffer<'d>)> {
160 // TODO(b/379778252): use single `zbi_zircon` buffer for container to store both kernel and
161 // arguments/items
162 let mut zbi_items_img =
163 ops.get_image_buffer("zbi_items", NonZeroUsize::new(64 * 1024 * 1024).unwrap()).unwrap();
164
165 let init_len = zbi_items_img.tail().len();
166 // TODO(b/379787423): it is possible to optimize this initialisation by treating
167 // `zbi_items_img` same as kernel image (&[MaybeUninit]).
168 MaybeUninit::fill(zbi_items_img.tail(), 0);
169
170 // SAFETY: buffer was fully filled with 0 which is valid init value for u8
171 unsafe {
172 zbi_items_img.advance_used(init_len).unwrap();
173 }
174 let mut zbi_items = ZbiContainer::new(zbi_items_img.used_mut())?;
175
176 let zircon_part = find_part_aliases(ops, zircon_part_name_aliases(slot))?;
177
178 // Reads ZBI header to computes the total size of kernel.
179 let mut zbi_header: ZbiHeader = Default::default();
180 ops.read_from_partition_sync(zircon_part, 0, zbi_header.as_bytes_mut())?;
181 let image_length = (SafeNum::from(zbi_header.as_bytes_mut().len()) + zbi_header.length)
182 .try_into()
183 .map_err(Error::from)?;
184
185 // Reads the entire kernel
186 // TODO(b/379778252): as part of an attempt to use single container for kernel and arguments,
187 // it would be necessary to read kernel header first to figure out how much space needed
188 // (kernel size + scratch space)
189 let mut kernel_img =
190 ops.get_image_buffer("zbi_zircon", NonZeroUsize::new(128 * 1024 * 1024).unwrap()).unwrap();
191 let kernel_uninit = kernel_img
192 .as_mut()
193 .get_mut(..image_length)
194 .ok_or(Error::BufferTooSmall(Some(image_length)))?;
195 ops.read_from_partition_sync(zircon_part, 0, kernel_uninit)?;
196 // SAFETY: buffer was successfully filled from partition
197 unsafe {
198 kernel_img.advance_used(image_length).unwrap();
199 }
200 let load = kernel_img.used_mut();
201
202 // Performs AVB verification.
203 // TODO(b/379789161) verify that kernel buffer is big enough for the image and scratch buffer.
204 zircon_verify_kernel(ops, slot, slot_booted_successfully, load, &mut zbi_items)?;
205 // TODO(b/380409163) make sure moved items are before appended one to facilitate overriding.
206 // It is not as efficient as moving kernel since ZBI items would contain file system and be
207 // bigger than kernel.
208 copy_items_after_kernel(load, &mut zbi_items)?;
209
210 // Append additional ZBI items.
211 match slot {
212 Some(slot) => {
213 // Appends current slot item.
214 zbi_items.create_entry_with_payload(
215 ZbiType::CmdLine,
216 0,
217 ZbiFlags::default(),
218 slot_cmd_line(slot).as_bytes(),
219 )?;
220 }
221 _ => {}
222 }
223
224 // Appends device specific ZBI items.
225 ops.zircon_add_device_zbi_items(&mut zbi_items)?;
226
227 // Appends staged bootloader file if present.
228 match ops.get_zbi_bootloader_files_buffer_aligned().map(|v| ZbiContainer::parse(v)) {
229 Some(Ok(v)) => zbi_items.extend(&v)?,
230 _ => {}
231 }
232
233 Ok((zbi_items_img, kernel_img))
234 }
235
236 /// Loads and verifies the active slot kernel according to A/B/R.
237 ///
238 /// On disk A/B/R metadata will be updated.
239 ///
240 /// # Args
241 ///
242 /// * `ops`: A reference to an object that implements `GblOps`.
243 ///
244 /// Returns a tuple containing: 1. the slice of the ZBI container with device ZBI items, 2. the
245 /// slice of the relocated kernel, and 3. the selected slot index.
zircon_load_verify_abr<'a, 'd>( ops: &mut impl GblOps<'a, 'd>, ) -> GblResult<(ImageBuffer<'d>, ImageBuffer<'d>, SlotIndex)>246 pub fn zircon_load_verify_abr<'a, 'd>(
247 ops: &mut impl GblOps<'a, 'd>,
248 ) -> GblResult<(ImageBuffer<'d>, ImageBuffer<'d>, SlotIndex)> {
249 let (slot, successful) = get_boot_slot(&mut GblAbrOps(ops), true);
250 gbl_println!(ops, "Loading kernel from {}...", zircon_part_name(Some(slot)));
251 let (zbi_items_img, kernel_img) = zircon_load_verify(ops, Some(slot), successful)?;
252 gbl_println!(ops, "Successfully loaded slot: {}", zircon_part_name(Some(slot)));
253 Ok((zbi_items_img, kernel_img, slot))
254 }
255
256 /// Checks whether platform or A/B/R metadata instructs GBL to boot into fastboot mode.
257 ///
258 /// # Returns
259 ///
260 /// Returns true if fastboot mode is enabled, false if not.
zircon_check_enter_fastboot<'a, 'b>(ops: &mut impl GblOps<'a, 'b>) -> bool261 pub fn zircon_check_enter_fastboot<'a, 'b>(ops: &mut impl GblOps<'a, 'b>) -> bool {
262 match get_and_clear_one_shot_bootloader(&mut GblAbrOps(ops)) {
263 Ok(true) => {
264 gbl_println!(ops, "A/B/R one-shot-bootloader is set");
265 return true;
266 }
267 Err(e) => {
268 gbl_println!(ops, "Warning: error while checking A/B/R one-shot-bootloader ({:?})", e);
269 gbl_println!(ops, "Ignoring error and considered not set");
270 }
271 _ => {}
272 };
273
274 match ops.should_stop_in_fastboot() {
275 Ok(true) => {
276 gbl_println!(ops, "Platform instructs GBL to enter fastboot mode");
277 return true;
278 }
279 Err(e) => {
280 gbl_println!(ops, "Warning: error while checking platform fastboot trigger ({:?})", e);
281 gbl_println!(ops, "Ignoring error and considered not triggered");
282 }
283 _ => {}
284 };
285 false
286 }
287
288 #[cfg(test)]
289 mod test {
290 use super::*;
291 use crate::ops::{
292 test::{FakeGblOps, FakeGblOpsStorage, TestGblDisk},
293 CertPermanentAttributes,
294 };
295 use abr::{
296 mark_slot_active, mark_slot_unbootable, set_one_shot_bootloader, ABR_MAX_TRIES_REMAINING,
297 };
298 use avb_bindgen::{AVB_CERT_PIK_VERSION_LOCATION, AVB_CERT_PSK_VERSION_LOCATION};
299 use gbl_storage::as_uninit_mut;
300 use libutils::aligned_offset;
301 use std::{
302 collections::{BTreeSet, HashMap, LinkedList},
303 fs,
304 ops::{Deref, DerefMut},
305 path::Path,
306 };
307 use zbi::ZBI_ALIGNMENT_USIZE;
308 use zerocopy::FromBytes;
309
310 // The cert test keys were both generated with rollback version 42.
311 const TEST_CERT_PIK_VERSION: u64 = 42;
312 const TEST_CERT_PSK_VERSION: u64 = 42;
313
314 // The `reserve_memory_size` value in the test ZBI kernel.
315 // See `gen_zircon_test_images()` in libgbl/testdata/gen_test_data.py.
316 const TEST_KERNEL_RESERVED_MEMORY_SIZE: usize = 1024;
317
318 // The rollback index value and location in the generated test vbmetadata.
319 // See `gen_zircon_test_images()` in libgbl/testdata/gen_test_data.py.
320 const TEST_ROLLBACK_INDEX_LOCATION: usize = 1;
321 const TEST_ROLLBACK_INDEX_VALUE: u64 = 2;
322
323 pub(crate) const ZIRCON_A_ZBI_FILE: &str = "zircon_a.zbi";
324 pub(crate) const ZIRCON_B_ZBI_FILE: &str = "zircon_b.zbi";
325 pub(crate) const ZIRCON_R_ZBI_FILE: &str = "zircon_r.zbi";
326 pub(crate) const ZIRCON_SLOTLESS_ZBI_FILE: &str = "zircon_slotless.zbi";
327 pub(crate) const VBMETA_A_FILE: &str = "vbmeta_a.bin";
328 pub(crate) const VBMETA_B_FILE: &str = "vbmeta_b.bin";
329 pub(crate) const VBMETA_R_FILE: &str = "vbmeta_r.bin";
330 pub(crate) const VBMETA_SLOTLESS_FILE: &str = "vbmeta_slotless.bin";
331
332 /// Reads a data file under libgbl/testdata/
read_test_data(file: &str) -> Vec<u8>333 pub(crate) fn read_test_data(file: &str) -> Vec<u8> {
334 fs::read(Path::new(format!("external/gbl/libgbl/testdata/{}", file).as_str())).unwrap()
335 }
336
337 /// Returns a default [FakeGblOpsStorage] with valid test images.
338 ///
339 /// Rather than the typical use case of partitions on a single GPT device, this structures data
340 /// as separate raw single-partition devices. This is easier for tests since we don't need to
341 /// generate a GPT, and should be functionally equivalent since our code looks for partitions
342 /// on all devices.
create_storage() -> FakeGblOpsStorage343 pub(crate) fn create_storage() -> FakeGblOpsStorage {
344 let mut storage = FakeGblOpsStorage::default();
345 storage.add_raw_device(c"zircon_a", read_test_data(ZIRCON_A_ZBI_FILE));
346 storage.add_raw_device(c"zircon_b", read_test_data(ZIRCON_B_ZBI_FILE));
347 storage.add_raw_device(c"zircon_r", read_test_data(ZIRCON_R_ZBI_FILE));
348 storage.add_raw_device(c"zircon", read_test_data(ZIRCON_SLOTLESS_ZBI_FILE));
349 storage.add_raw_device(c"vbmeta_a", read_test_data(VBMETA_A_FILE));
350 storage.add_raw_device(c"vbmeta_b", read_test_data(VBMETA_B_FILE));
351 storage.add_raw_device(c"vbmeta_r", read_test_data(VBMETA_R_FILE));
352 storage.add_raw_device(c"vbmeta", read_test_data(VBMETA_SLOTLESS_FILE));
353 storage.add_raw_device(c"durable_boot", vec![0u8; 64 * 1024]);
354 storage
355 }
356
357 /// Returns a default [FakeGblOpsStorage] with valid test images and using legacy partition
358 /// names.
create_storage_legacy_names() -> FakeGblOpsStorage359 pub(crate) fn create_storage_legacy_names() -> FakeGblOpsStorage {
360 let mut storage = FakeGblOpsStorage::default();
361 storage.add_raw_device(c"zircon-a", read_test_data(ZIRCON_A_ZBI_FILE));
362 storage.add_raw_device(c"zircon-b", read_test_data(ZIRCON_B_ZBI_FILE));
363 storage.add_raw_device(c"zircon-r", read_test_data(ZIRCON_R_ZBI_FILE));
364 storage.add_raw_device(c"zircon", read_test_data(ZIRCON_SLOTLESS_ZBI_FILE));
365 storage.add_raw_device(c"vbmeta_a", read_test_data(VBMETA_A_FILE));
366 storage.add_raw_device(c"vbmeta_b", read_test_data(VBMETA_B_FILE));
367 storage.add_raw_device(c"vbmeta_r", read_test_data(VBMETA_R_FILE));
368 storage.add_raw_device(c"vbmeta", read_test_data(VBMETA_SLOTLESS_FILE));
369 storage.add_raw_device(c"misc", vec![0u8; 64 * 1024]);
370 storage
371 }
372
create_gbl_ops<'a>(partitions: &'a [TestGblDisk]) -> FakeGblOps<'a, '_>373 pub(crate) fn create_gbl_ops<'a>(partitions: &'a [TestGblDisk]) -> FakeGblOps<'a, '_> {
374 let mut ops = FakeGblOps::new(&partitions);
375 ops.avb_ops.unlock_state = Ok(false);
376 ops.avb_ops.rollbacks = HashMap::from([
377 (TEST_ROLLBACK_INDEX_LOCATION, Ok(0)),
378 (AVB_CERT_PSK_VERSION_LOCATION.try_into().unwrap(), Ok(0)),
379 (AVB_CERT_PIK_VERSION_LOCATION.try_into().unwrap(), Ok(0)),
380 ]);
381 ops.avb_ops.use_cert = true;
382 ops.avb_ops.cert_permanent_attributes = Some(
383 CertPermanentAttributes::read_from(
384 &read_test_data("cert_permanent_attributes.bin")[..],
385 )
386 .unwrap(),
387 );
388 ops.avb_ops.cert_permanent_attributes_hash =
389 Some(read_test_data("cert_permanent_attributes.hash").try_into().unwrap());
390 ops
391 }
392
393 // Helper object for allocating aligned buffer.
394 pub(crate) struct AlignedBuffer {
395 buffer: Vec<u8>,
396 size: usize,
397 alignment: usize,
398 }
399
400 impl AlignedBuffer {
401 /// Allocates a buffer.
new(size: usize, alignment: usize) -> Self402 pub(crate) fn new(size: usize, alignment: usize) -> Self {
403 Self { buffer: vec![0u8; alignment + size - 1], size, alignment }
404 }
405
406 /// Allocates a buffer and initializes with data.
new_with_data(data: &[u8], alignment: usize) -> Self407 pub(crate) fn new_with_data(data: &[u8], alignment: usize) -> Self {
408 let mut res = Self::new(data.len(), alignment);
409 res.clone_from_slice(data);
410 res
411 }
412 }
413
414 impl Deref for AlignedBuffer {
415 type Target = [u8];
416
deref(&self) -> &Self::Target417 fn deref(&self) -> &Self::Target {
418 let off = aligned_offset(&self.buffer, self.alignment).unwrap();
419 &self.buffer[off..][..self.size]
420 }
421 }
422
423 impl DerefMut for AlignedBuffer {
deref_mut(&mut self) -> &mut Self::Target424 fn deref_mut(&mut self) -> &mut Self::Target {
425 let off = aligned_offset(&self.buffer, self.alignment).unwrap();
426 &mut self.buffer[off..][..self.size]
427 }
428 }
429
430 /// Normalizes a ZBI container by converting each ZBI item into raw bytes and storing them in
431 /// an ordered set. The function is mainly used for comparing two ZBI containers have identical
432 /// set of items, disregarding order.
normalize_zbi(zbi: &[u8]) -> BTreeSet<Vec<u8>>433 pub(crate) fn normalize_zbi(zbi: &[u8]) -> BTreeSet<Vec<u8>> {
434 let zbi = ZbiContainer::parse(zbi).unwrap();
435 BTreeSet::from_iter(zbi.iter().map(|v| {
436 let mut hdr = *v.header;
437 hdr.crc32 = 0; // ignores crc32 field.
438 hdr.flags &= !ZbiFlags::CRC32.bits();
439 [hdr.as_bytes(), v.payload.as_bytes()].concat()
440 }))
441 }
442
443 /// Helper to append a command line ZBI item to a ZBI container
append_cmd_line(zbi: &mut [u8], cmd: &[u8])444 pub(crate) fn append_cmd_line(zbi: &mut [u8], cmd: &[u8]) {
445 let mut container = ZbiContainer::parse(zbi).unwrap();
446 container.create_entry_with_payload(ZbiType::CmdLine, 0, ZbiFlags::default(), cmd).unwrap();
447 }
448
449 /// Helper to append a command line ZBI item to a ZBI container
append_zbi_file(zbi: &mut [u8], payload: &[u8])450 pub(crate) fn append_zbi_file(zbi: &mut [u8], payload: &[u8]) {
451 let mut container = ZbiContainer::parse(zbi).unwrap();
452 container
453 .create_entry_with_payload(ZbiType::BootloaderFile, 0, ZbiFlags::default(), payload)
454 .unwrap();
455 }
456
457 /// Helper for testing `zircon_load_verify`.
test_load_verify( ops: &mut FakeGblOps, slot: Option<SlotIndex>, expected_zbi_items: &[u8], expected_kernel: &[u8], )458 fn test_load_verify(
459 ops: &mut FakeGblOps,
460 slot: Option<SlotIndex>,
461 expected_zbi_items: &[u8],
462 expected_kernel: &[u8],
463 ) {
464 let original_rb = ops.avb_ops.rollbacks.clone();
465 // Loads and verifies with unsuccessful slot flag first.
466 let (mut zbi_items, mut kernel) = zircon_load_verify(ops, slot, false).unwrap();
467 // Verifies loaded ZBI kernel/items
468 assert_eq!(normalize_zbi(expected_zbi_items), normalize_zbi(zbi_items.used_mut()));
469 // Verifies kernel
470 assert_eq!(normalize_zbi(expected_kernel), normalize_zbi(kernel.used_mut()));
471 // Kernel is at aligned address
472 assert_eq!(kernel.used_mut().as_ptr() as usize % ZIRCON_KERNEL_ALIGN, 0);
473
474 // Verifies that the slot successful flag is passed correctly.
475 // Unsuccessful slot, rollback not updated.
476 assert_eq!(ops.avb_ops.rollbacks, original_rb);
477 // Loads and verifies with successful slot flag.
478 zircon_load_verify(ops, slot, true).unwrap();
479 // Successful slot, rollback updated.
480 assert_eq!(
481 ops.avb_ops.rollbacks,
482 [
483 (TEST_ROLLBACK_INDEX_LOCATION, Ok(TEST_ROLLBACK_INDEX_VALUE)),
484 (
485 usize::try_from(AVB_CERT_PSK_VERSION_LOCATION).unwrap(),
486 Ok(TEST_CERT_PSK_VERSION)
487 ),
488 (
489 usize::try_from(AVB_CERT_PIK_VERSION_LOCATION).unwrap(),
490 Ok(TEST_CERT_PIK_VERSION)
491 )
492 ]
493 .into()
494 );
495 }
496
497 // Helper to create local buffers and convert them to be used as ImageBuffers
498 // This struct owns the buffers, and returns ImageBuffers maps that reference them.
499 //
500 // Tests should make sure to provide enough buffers for all `get_image_buffer()` calls.
501 //
502 struct ImageBuffersPool(LinkedList<(String, Vec<AlignedBuffer>)>);
503
504 impl ImageBuffersPool {
builder() -> ImageBuffersBuilder505 pub fn builder() -> ImageBuffersBuilder {
506 ImageBuffersBuilder::new()
507 }
508
509 // number - number of expected get_image_buffer calls. Each call consumes buffers from the
510 // list. If there are not enough it will start returning errors.
511 //
512 // size - size for the buffers
new(number: usize, size: usize) -> Self513 fn new(number: usize, size: usize) -> Self {
514 let mut zbi_items_buffer_vec = Vec::<AlignedBuffer>::new();
515 let mut zbi_zircon_buffer_vec = Vec::<AlignedBuffer>::new();
516 for _ in 0..number {
517 zbi_zircon_buffer_vec.push(AlignedBuffer::new(size, ZIRCON_KERNEL_ALIGN));
518 zbi_items_buffer_vec.push(AlignedBuffer::new(size, ZBI_ALIGNMENT_USIZE));
519 }
520
521 Self(
522 [
523 (String::from("zbi_zircon"), zbi_zircon_buffer_vec),
524 (String::from("zbi_items"), zbi_items_buffer_vec),
525 ]
526 .into(),
527 )
528 }
529
get(&mut self) -> HashMap<String, LinkedList<ImageBuffer>>530 pub fn get(&mut self) -> HashMap<String, LinkedList<ImageBuffer>> {
531 self.0
532 .iter_mut()
533 .map(|(key, val_vec)| {
534 (
535 key.clone(),
536 val_vec
537 .iter_mut()
538 .map(|e| ImageBuffer::new(as_uninit_mut(e.as_mut())))
539 .collect(),
540 )
541 })
542 .collect()
543 }
544 }
545
546 struct ImageBuffersBuilder {
547 // Number of buffers for each image name
548 number: usize,
549 // Size of the buffers
550 size: usize,
551 }
552
553 /// Tests should make sure to provide enough buffers for all `get_image_buffer()` calls.
554 /// Default number of calls is 1, if more expected use `builder().number(N).build()`
555 /// Default buffer sizes are 2KiB, if different size required use `builder().size(1MiB).build()`
556 impl ImageBuffersBuilder {
new() -> ImageBuffersBuilder557 pub fn new() -> ImageBuffersBuilder {
558 Self { number: 1, size: 2 * 1024 }
559 }
560
561 /// If more than 1 `get_image_buffer()` call expected `number(N)` should be used to create
562 /// big enough pool of buffers.
number(mut self, number: usize) -> ImageBuffersBuilder563 pub fn number(mut self, number: usize) -> ImageBuffersBuilder {
564 self.number = number;
565 self
566 }
567
568 /// To change size of buffers use `builder(). size(S).build()`.
size(mut self, size: usize) -> ImageBuffersBuilder569 pub fn size(mut self, size: usize) -> ImageBuffersBuilder {
570 self.size = size;
571 self
572 }
573
build(self) -> ImageBuffersPool574 pub fn build(self) -> ImageBuffersPool {
575 ImageBuffersPool::new(self.number, self.size)
576 }
577 }
578
579 #[test]
test_zircon_load_verify_slotless()580 fn test_zircon_load_verify_slotless() {
581 let storage = create_storage();
582 let mut ops = create_gbl_ops(&storage);
583 let mut image_buffers_pool = ImageBuffersPool::builder().number(2).build();
584 ops.image_buffers = image_buffers_pool.get();
585
586 let zbi = &read_test_data(ZIRCON_SLOTLESS_ZBI_FILE);
587 let expected_kernel = AlignedBuffer::new_with_data(zbi, ZBI_ALIGNMENT_USIZE);
588 // Adds extra bytes for device ZBI items.
589 let mut expected_zbi_items = AlignedBuffer::new(1024, ZBI_ALIGNMENT_USIZE);
590 let _ = ZbiContainer::new(&mut expected_zbi_items[..]).unwrap();
591 append_cmd_line(&mut expected_zbi_items, FakeGblOps::ADDED_ZBI_COMMANDLINE_CONTENTS);
592 append_cmd_line(&mut expected_zbi_items, b"vb_prop_0=val\0");
593 append_cmd_line(&mut expected_zbi_items, b"vb_prop_1=val\0");
594 append_zbi_file(&mut expected_zbi_items, FakeGblOps::TEST_BOOTLOADER_FILE_1);
595 append_zbi_file(&mut expected_zbi_items, FakeGblOps::TEST_BOOTLOADER_FILE_2);
596 test_load_verify(&mut ops, None, &expected_zbi_items, &expected_kernel);
597 }
598
599 /// Helper for testing `zircon_load_verify` using A/B/R.
test_load_verify_slotted_helper( ops: &mut FakeGblOps, slot: SlotIndex, zbi: &[u8], slot_item: &str, )600 fn test_load_verify_slotted_helper(
601 ops: &mut FakeGblOps,
602 slot: SlotIndex,
603 zbi: &[u8],
604 slot_item: &str,
605 ) {
606 let expected_kernel = AlignedBuffer::new_with_data(zbi, ZBI_ALIGNMENT_USIZE);
607 // Adds extra bytes for device ZBI items.
608 let mut expected_zbi_items = AlignedBuffer::new(1024, ZBI_ALIGNMENT_USIZE);
609 let _ = ZbiContainer::new(&mut expected_zbi_items[..]).unwrap();
610 append_cmd_line(&mut expected_zbi_items, FakeGblOps::ADDED_ZBI_COMMANDLINE_CONTENTS);
611 append_cmd_line(&mut expected_zbi_items, b"vb_prop_0=val\0");
612 append_cmd_line(&mut expected_zbi_items, b"vb_prop_1=val\0");
613 append_cmd_line(&mut expected_zbi_items, slot_item.as_bytes());
614 append_zbi_file(&mut expected_zbi_items, FakeGblOps::TEST_BOOTLOADER_FILE_1);
615 append_zbi_file(&mut expected_zbi_items, FakeGblOps::TEST_BOOTLOADER_FILE_2);
616 test_load_verify(ops, Some(slot), &expected_zbi_items, &expected_kernel);
617 }
618
619 #[test]
test_load_verify_slot_a()620 fn test_load_verify_slot_a() {
621 let storage = create_storage();
622 let mut ops = create_gbl_ops(&storage);
623 let mut image_buffers_pool = ImageBuffersPool::builder().number(2).build();
624 ops.image_buffers = image_buffers_pool.get();
625
626 let zircon_a_zbi = &read_test_data(ZIRCON_A_ZBI_FILE);
627 test_load_verify_slotted_helper(&mut ops, SlotIndex::A, zircon_a_zbi, "zvb.current_slot=a");
628 }
629
630 #[test]
test_load_verify_slot_b()631 fn test_load_verify_slot_b() {
632 let storage = create_storage();
633 let mut ops = create_gbl_ops(&storage);
634 let mut image_buffers_pool = ImageBuffersPool::builder().number(2).build();
635 ops.image_buffers = image_buffers_pool.get();
636
637 let zircon_b_zbi = &read_test_data(ZIRCON_B_ZBI_FILE);
638 test_load_verify_slotted_helper(&mut ops, SlotIndex::B, zircon_b_zbi, "zvb.current_slot=b");
639 }
640
641 #[test]
test_load_verify_slot_r()642 fn test_load_verify_slot_r() {
643 let storage = create_storage();
644 let mut ops = create_gbl_ops(&storage);
645 let mut image_buffers_pool = ImageBuffersPool::builder().number(2).build();
646 ops.image_buffers = image_buffers_pool.get();
647
648 let zircon_r_zbi = &read_test_data(ZIRCON_R_ZBI_FILE);
649 test_load_verify_slotted_helper(&mut ops, SlotIndex::R, zircon_r_zbi, "zvb.current_slot=r");
650 }
651
652 #[test]
test_not_enough_buffer_for_reserved_memory()653 fn test_not_enough_buffer_for_reserved_memory() {
654 let storage = create_storage();
655 let mut ops = create_gbl_ops(&storage);
656 let mut image_buffers_pool = ImageBuffersPool::builder().size(1024).build();
657 ops.image_buffers = image_buffers_pool.get();
658
659 assert!(zircon_load_verify(&mut ops, Some(SlotIndex::A), true).is_err());
660 }
661
662 /// A helper for assembling a set of test needed data. These include:
663 ///
664 /// * The original ZBI kernel image on partition `part` in the given `FakeGblOps`.
665 /// * A buffer for loading and verifying the kernel.
666 /// * The expected ZBI item buffer, if successfully loaded as slot index `slot`.
667 /// * The expected ZBI kernel buffer, if successfully loaded.
load_verify_test_data( ops: &mut FakeGblOps, slot: SlotIndex, part: &str, ) -> (Vec<u8>, AlignedBuffer, AlignedBuffer, AlignedBuffer)668 fn load_verify_test_data(
669 ops: &mut FakeGblOps,
670 slot: SlotIndex,
671 part: &str,
672 ) -> (Vec<u8>, AlignedBuffer, AlignedBuffer, AlignedBuffer) {
673 // Read the (possibly modified) ZBI from disk.
674 let zbi = ops.copy_partition(part);
675 let sz = ZIRCON_KERNEL_ALIGN + zbi.len() + TEST_KERNEL_RESERVED_MEMORY_SIZE;
676 let load_buffer = AlignedBuffer::new(sz, ZIRCON_KERNEL_ALIGN);
677 let expected_kernel = AlignedBuffer::new_with_data(&zbi, ZBI_ALIGNMENT_USIZE);
678 // Adds extra bytes for device ZBI items.
679 let mut expected_zbi_items = AlignedBuffer::new(1024, ZBI_ALIGNMENT_USIZE);
680 let _ = ZbiContainer::new(&mut expected_zbi_items[..]).unwrap();
681 append_cmd_line(&mut expected_zbi_items, FakeGblOps::ADDED_ZBI_COMMANDLINE_CONTENTS);
682 append_cmd_line(&mut expected_zbi_items, b"vb_prop_0=val\0");
683 append_cmd_line(&mut expected_zbi_items, b"vb_prop_1=val\0");
684 append_cmd_line(
685 &mut expected_zbi_items,
686 format!("zvb.current_slot={}", char::from(slot)).as_bytes(),
687 );
688 append_zbi_file(&mut expected_zbi_items, FakeGblOps::TEST_BOOTLOADER_FILE_1);
689 append_zbi_file(&mut expected_zbi_items, FakeGblOps::TEST_BOOTLOADER_FILE_2);
690 (zbi, load_buffer, expected_zbi_items, expected_kernel)
691 }
692
693 // Calls `zircon_load_verify_abr` and checks that the specified slot is loaded.
expect_load_verify_abr_ok(ops: &mut FakeGblOps, slot: SlotIndex, part: &str)694 fn expect_load_verify_abr_ok(ops: &mut FakeGblOps, slot: SlotIndex, part: &str) {
695 let (_, _load, expected_items, expected_kernel) = load_verify_test_data(ops, slot, part);
696 let (mut zbi_items, mut kernel, active) = zircon_load_verify_abr(ops).unwrap();
697 assert_eq!(normalize_zbi(&expected_items), normalize_zbi(zbi_items.used_mut()));
698 assert_eq!(normalize_zbi(&expected_kernel), normalize_zbi(kernel.used_mut()));
699 assert_eq!(active, slot);
700 }
701
702 #[test]
test_load_verify_abr_slot_a()703 fn test_load_verify_abr_slot_a() {
704 let storage = create_storage();
705 let mut ops = create_gbl_ops(&storage);
706 let mut image_buffers_pool = ImageBuffersPool::builder().build();
707 ops.image_buffers = image_buffers_pool.get();
708
709 expect_load_verify_abr_ok(&mut ops, SlotIndex::A, "zircon_a");
710 }
711
712 #[test]
test_load_verify_abr_slot_b()713 fn test_load_verify_abr_slot_b() {
714 let storage = create_storage();
715 let mut ops = create_gbl_ops(&storage);
716 let mut image_buffers_pool = ImageBuffersPool::builder().build();
717 ops.image_buffers = image_buffers_pool.get();
718
719 mark_slot_active(&mut GblAbrOps(&mut ops), SlotIndex::B).unwrap();
720 expect_load_verify_abr_ok(&mut ops, SlotIndex::B, "zircon_b");
721 }
722
723 #[test]
test_load_verify_abr_slot_r()724 fn test_load_verify_abr_slot_r() {
725 let storage = create_storage();
726 let mut ops = create_gbl_ops(&storage);
727 let mut image_buffers_pool = ImageBuffersPool::builder().build();
728 ops.image_buffers = image_buffers_pool.get();
729
730 mark_slot_unbootable(&mut GblAbrOps(&mut ops), SlotIndex::A).unwrap();
731 mark_slot_unbootable(&mut GblAbrOps(&mut ops), SlotIndex::B).unwrap();
732 expect_load_verify_abr_ok(&mut ops, SlotIndex::R, "zircon_r");
733 }
734
735 #[test]
test_load_verify_abr_exhaust_retries()736 fn test_load_verify_abr_exhaust_retries() {
737 let storage = create_storage();
738 let mut ops = create_gbl_ops(&storage);
739 let mut image_buffers_pool =
740 ImageBuffersPool::builder().number((1 + 2 * ABR_MAX_TRIES_REMAINING).into()).build();
741 ops.image_buffers = image_buffers_pool.get();
742
743 for _ in 0..ABR_MAX_TRIES_REMAINING {
744 expect_load_verify_abr_ok(&mut ops, SlotIndex::A, "zircon_a");
745 }
746 for _ in 0..ABR_MAX_TRIES_REMAINING {
747 expect_load_verify_abr_ok(&mut ops, SlotIndex::B, "zircon_b");
748 }
749 // Tests that load falls back to R eventually.
750 expect_load_verify_abr_ok(&mut ops, SlotIndex::R, "zircon_r");
751 }
752
753 /// Modifies data in the given partition.
corrupt_data(ops: &mut FakeGblOps, part_name: &str)754 pub(crate) fn corrupt_data(ops: &mut FakeGblOps, part_name: &str) {
755 let mut data = [0u8];
756 assert!(ops.read_from_partition_sync(part_name, 64, &mut data[..]).is_ok());
757 data[0] ^= 0x01;
758 assert!(ops.write_to_partition_sync(part_name, 64, &mut data[..]).is_ok());
759 }
760
761 #[test]
test_load_verify_abr_verify_failure_a_b()762 fn test_load_verify_abr_verify_failure_a_b() {
763 let storage = create_storage();
764 let mut ops = create_gbl_ops(&storage);
765 let mut image_buffers_pool =
766 ImageBuffersPool::builder().number((1 + 2 * ABR_MAX_TRIES_REMAINING).into()).build();
767 ops.image_buffers = image_buffers_pool.get();
768
769 corrupt_data(&mut ops, "zircon_a");
770 corrupt_data(&mut ops, "zircon_b");
771
772 let (_, _load, _, _) = load_verify_test_data(&mut ops, SlotIndex::A, "zircon_a");
773 for _ in 0..ABR_MAX_TRIES_REMAINING {
774 assert!(zircon_load_verify_abr(&mut ops).is_err());
775 }
776 let (_, _load, _, _) = load_verify_test_data(&mut ops, SlotIndex::B, "zircon_b");
777 for _ in 0..ABR_MAX_TRIES_REMAINING {
778 assert!(zircon_load_verify_abr(&mut ops).is_err());
779 }
780 // Tests that load falls back to R eventually.
781 expect_load_verify_abr_ok(&mut ops, SlotIndex::R, "zircon_r");
782 }
783
784 #[test]
test_load_verify_abr_verify_failure_unlocked()785 fn test_load_verify_abr_verify_failure_unlocked() {
786 let storage = create_storage();
787 let mut ops = create_gbl_ops(&storage);
788 let mut image_buffers_pool =
789 ImageBuffersPool::builder().number((1 + 2 * ABR_MAX_TRIES_REMAINING).into()).build();
790 ops.image_buffers = image_buffers_pool.get();
791
792 ops.avb_ops.unlock_state = Ok(true);
793 corrupt_data(&mut ops, "zircon_a");
794 corrupt_data(&mut ops, "zircon_b");
795
796 for _ in 0..ABR_MAX_TRIES_REMAINING {
797 expect_load_verify_abr_ok(&mut ops, SlotIndex::A, "zircon_a");
798 }
799 for _ in 0..ABR_MAX_TRIES_REMAINING {
800 expect_load_verify_abr_ok(&mut ops, SlotIndex::B, "zircon_b");
801 }
802 expect_load_verify_abr_ok(&mut ops, SlotIndex::R, "zircon_r");
803 }
804
805 #[test]
test_check_enter_fastboot_stop_in_fastboot()806 fn test_check_enter_fastboot_stop_in_fastboot() {
807 let storage = create_storage();
808 let mut ops = create_gbl_ops(&storage);
809
810 ops.stop_in_fastboot = Ok(false).into();
811 assert!(!zircon_check_enter_fastboot(&mut ops));
812
813 ops.stop_in_fastboot = Ok(true).into();
814 assert!(zircon_check_enter_fastboot(&mut ops));
815
816 ops.stop_in_fastboot = Err(Error::NotImplemented).into();
817 assert!(!zircon_check_enter_fastboot(&mut ops));
818 }
819
820 #[test]
test_check_enter_fastboot_abr()821 fn test_check_enter_fastboot_abr() {
822 let storage = create_storage();
823 let mut ops = create_gbl_ops(&storage);
824 set_one_shot_bootloader(&mut GblAbrOps(&mut ops), true).unwrap();
825 assert!(zircon_check_enter_fastboot(&mut ops));
826 // One-shot only.
827 assert!(!zircon_check_enter_fastboot(&mut ops));
828 }
829
830 #[test]
test_check_enter_fastboot_prioritize_abr()831 fn test_check_enter_fastboot_prioritize_abr() {
832 let storage = create_storage();
833 let mut ops = create_gbl_ops(&storage);
834 set_one_shot_bootloader(&mut GblAbrOps(&mut ops), true).unwrap();
835 ops.stop_in_fastboot = Ok(true).into();
836 assert!(zircon_check_enter_fastboot(&mut ops));
837 ops.stop_in_fastboot = Ok(false).into();
838 // A/B/R metadata should be prioritized in the previous check and thus one-shot-booloader
839 // flag should be cleared.
840 assert!(!zircon_check_enter_fastboot(&mut ops));
841 }
842 #[test]
test_load_verify_abr_legacy_naming()843 fn test_load_verify_abr_legacy_naming() {
844 let storage = create_storage_legacy_names();
845 let mut ops = create_gbl_ops(&storage);
846 let mut image_buffers_pool =
847 ImageBuffersPool::builder().number((1 + 2 * ABR_MAX_TRIES_REMAINING).into()).build();
848 ops.image_buffers = image_buffers_pool.get();
849
850 // Tests by exhausting all slots retries so it exercises all legacy name matching code
851 // paths.
852 for _ in 0..ABR_MAX_TRIES_REMAINING {
853 expect_load_verify_abr_ok(&mut ops, SlotIndex::A, "zircon-a");
854 }
855 for _ in 0..ABR_MAX_TRIES_REMAINING {
856 expect_load_verify_abr_ok(&mut ops, SlotIndex::B, "zircon-b");
857 }
858 // Tests that load falls back to R eventually.
859 expect_load_verify_abr_ok(&mut ops, SlotIndex::R, "zircon-r");
860 }
861
862 #[test]
test_zircon_load_verify_no_bootloader_file()863 fn test_zircon_load_verify_no_bootloader_file() {
864 let storage = create_storage();
865 let mut ops = create_gbl_ops(&storage);
866 let mut image_buffers_pool = ImageBuffersPool::builder().number(2).build();
867 ops.image_buffers = image_buffers_pool.get();
868 ops.get_zbi_bootloader_files_buffer().unwrap().fill(0);
869
870 let zbi = &read_test_data(ZIRCON_SLOTLESS_ZBI_FILE);
871 let expected_kernel = AlignedBuffer::new_with_data(zbi, ZBI_ALIGNMENT_USIZE);
872 // Adds extra bytes for device ZBI items.
873 let mut expected_zbi_items = AlignedBuffer::new(1024, ZBI_ALIGNMENT_USIZE);
874 let _ = ZbiContainer::new(&mut expected_zbi_items[..]).unwrap();
875 append_cmd_line(&mut expected_zbi_items, FakeGblOps::ADDED_ZBI_COMMANDLINE_CONTENTS);
876 append_cmd_line(&mut expected_zbi_items, b"vb_prop_0=val\0");
877 append_cmd_line(&mut expected_zbi_items, b"vb_prop_1=val\0");
878 test_load_verify(&mut ops, None, &expected_zbi_items, &expected_kernel);
879 }
880 }
881