xref: /aosp_15_r20/bootable/libbootloader/gbl/libboot/src/aarch64.rs (revision 5225e6b173e52d2efc6bcf950c27374fd72adabc)
1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Docs for booting on AArch64 is at:
16 //!
17 //!   https://www.kernel.org/doc/html/v5.11/arm64/booting.html
18 
19 use core::arch::asm;
20 use zbi::ZbiContainer;
21 
22 /// ARM exception levels.
23 #[allow(missing_docs)]
24 #[derive(Debug, PartialEq)]
25 pub enum ExceptionLevel {
26     EL0,
27     EL1,
28     EL2,
29     EL3,
30 }
31 
32 /// Gets the current EL;
current_el() -> ExceptionLevel33 pub fn current_el() -> ExceptionLevel {
34     let mut el: u64;
35     // SAFETY: The assembly code only read current exception level.
36     unsafe {
37         asm!(
38             "mrs {el}, CurrentEL",
39             el = out(reg) el,
40         );
41     }
42     el = (el >> 2) & 3;
43     match el {
44         0 => ExceptionLevel::EL0,
45         1 => ExceptionLevel::EL1,
46         2 => ExceptionLevel::EL2,
47         3 => ExceptionLevel::EL3,
48         v => panic!("Unknown EL {v}"),
49     }
50 }
51 
52 extern "C" {
53     /// Clean and invalidate data cache by address range. The function is from ATF library.
flush_dcache_range(addr: usize, len: usize)54     fn flush_dcache_range(addr: usize, len: usize);
55 }
56 
57 /// Flush all data cache for the given buffer.
flush_dcache_buffer(buf: &[u8])58 fn flush_dcache_buffer(buf: &[u8]) {
59     unsafe { flush_dcache_range(buf.as_ptr() as usize, buf.len()) }
60     // SAFETY: Assembly code for instruction synchronization.
61     unsafe { asm!("isb") };
62 }
63 
64 /// Disable cache, MMU and jump to the given kernel address with arguments.
65 ///
66 /// # Args
67 ///
68 /// * `addr`: Address to jump.
69 /// * `arg[0-3]`: Arguments for the target jump address.
70 ///
71 /// # Safety
72 ///
73 /// * Caller must ensure that `addr` contains valid execution code.
74 /// * Caller must ensure to flush any data cache for memory regions that contain data to be accessed
75 ///   by the destination code, including the execution code itself at address `addr`
jump_kernel(addr: usize, arg0: usize, arg1: usize, arg2: usize, arg3: usize) -> !76 unsafe fn jump_kernel(addr: usize, arg0: usize, arg1: usize, arg2: usize, arg3: usize) -> ! {
77     // TODO(b/334962949): Disable other stuffs such as interrupt, async abort, branch prediction etc.
78 
79     // After disabling MMU and cache, memory regions that have unflushed cache are stale and cannot
80     // be trusted, including stack memory. Therefore all needed data including local variables must
81     // be ensured to be loaded to registers first. `disable_cache_mmu_and_jump` only operates on
82     // registers and does not access stack or any other memory.
83     //
84     // SAFETY: By safety requirement of this function, `addr` contains valid execution code.
85     unsafe {
86         asm!(
87             "b disable_cache_mmu_and_jump",
88             in("x0") arg0,
89             in("x1") arg1,
90             in("x2") arg2,
91             in("x3") arg3,
92             in("x4") addr,
93         )
94     };
95     unreachable!();
96 }
97 
98 /// Boots a Linux kernel in mode EL2 or lower with the given FDT blob.
99 ///
100 /// # Safety
101 ///
102 /// Caller must ensure that `kernel` contains a valid Linux kernel.
jump_linux_el2_or_lower(kernel: &[u8], ramdisk: &[u8], fdt: &[u8]) -> !103 pub unsafe fn jump_linux_el2_or_lower(kernel: &[u8], ramdisk: &[u8], fdt: &[u8]) -> ! {
104     assert_ne!(current_el(), ExceptionLevel::EL3);
105     // The following is sufficient to work for existing use cases such as Cuttlefish. But there are
106     // additional initializations listed
107     // https://www.kernel.org/doc/html/v5.11/arm64/booting.html that may need to be performed
108     // explicitly for other platforms.
109 
110     flush_dcache_buffer(kernel);
111     flush_dcache_buffer(ramdisk);
112     flush_dcache_buffer(fdt);
113     // SAFETY:
114     // * `kernel`, `ramdisk` and `fdt` have been flushed.
115     // * By requirement of this function, `kernel` is a valid kernel entry point.
116     unsafe { jump_kernel(kernel.as_ptr() as _, fdt.as_ptr() as _, 0, 0, 0) };
117 }
118 
119 /// Boots a ZBI kernel in mode EL2 or lower with the given ZBI blob.
120 ///
121 /// # Safety
122 ///
123 /// Caller must ensure that `zbi_kernel` contains a valid zircon kernel ZBI item.
jump_zircon_el2_or_lower(kernel: &[u8], zbi_item: &[u8]) -> !124 pub unsafe fn jump_zircon_el2_or_lower(kernel: &[u8], zbi_item: &[u8]) -> ! {
125     assert_ne!(current_el(), ExceptionLevel::EL3);
126     let (entry, _) =
127         ZbiContainer::parse(kernel).unwrap().get_kernel_entry_and_reserved_memory_size().unwrap();
128     flush_dcache_buffer(kernel);
129     flush_dcache_buffer(zbi_item);
130     let addr = (kernel.as_ptr() as usize).checked_add(usize::try_from(entry).unwrap()).unwrap();
131     // SAFETY:
132     // * `zbi_kernel` and `zbi_item` have been flushed.
133     // * By requirement of this function, the computed `addr` is a valid kernel entry point.
134     unsafe { jump_kernel(addr, zbi_item.as_ptr() as _, 0, 0, 0) };
135 }
136