xref: /aosp_15_r20/external/crosvm/hypervisor/src/gunyah/aarch64.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::collections::BTreeMap;
6 
7 use base::error;
8 use base::Error;
9 use base::Result;
10 use cros_fdt::Fdt;
11 use cros_fdt::FdtNode;
12 use libc::ENOENT;
13 use libc::ENOTSUP;
14 use vm_memory::GuestAddress;
15 use vm_memory::MemoryRegionPurpose;
16 
17 use super::GunyahVcpu;
18 use super::GunyahVm;
19 use crate::AArch64SysRegId;
20 use crate::Hypervisor;
21 use crate::PsciVersion;
22 use crate::VcpuAArch64;
23 use crate::VcpuRegAArch64;
24 use crate::VmAArch64;
25 use crate::PSCI_0_2;
26 
27 const GIC_FDT_IRQ_TYPE_SPI: u32 = 0;
28 
29 const IRQ_TYPE_EDGE_RISING: u32 = 0x00000001;
30 const IRQ_TYPE_LEVEL_HIGH: u32 = 0x00000004;
31 
fdt_create_shm_device( parent: &mut FdtNode, index: u32, guest_addr: GuestAddress, ) -> cros_fdt::Result<()>32 fn fdt_create_shm_device(
33     parent: &mut FdtNode,
34     index: u32,
35     guest_addr: GuestAddress,
36 ) -> cros_fdt::Result<()> {
37     let shm_name = format!("shm-{:x}", index);
38     let shm_node = parent.subnode_mut(&shm_name)?;
39     shm_node.set_prop("vdevice-type", "shm")?;
40     shm_node.set_prop("peer-default", ())?;
41     shm_node.set_prop("dma_base", 0u64)?;
42     let mem_node = shm_node.subnode_mut("memory")?;
43     // We have to add the shm device for RM to accept the swiotlb memparcel.
44     // Memparcel is only used on android14-6.1. Once android14-6.1 is EOL
45     // we should be able to remove all the times we call fdt_create_shm_device()
46     mem_node.set_prop("optional", ())?;
47     mem_node.set_prop("label", index)?;
48     mem_node.set_prop("#address-cells", 2u32)?;
49     mem_node.set_prop("base", guest_addr.offset())
50 }
51 
52 impl VmAArch64 for GunyahVm {
get_hypervisor(&self) -> &dyn Hypervisor53     fn get_hypervisor(&self) -> &dyn Hypervisor {
54         &self.gh
55     }
56 
load_protected_vm_firmware( &mut self, fw_addr: GuestAddress, fw_max_size: u64, ) -> Result<()>57     fn load_protected_vm_firmware(
58         &mut self,
59         fw_addr: GuestAddress,
60         fw_max_size: u64,
61     ) -> Result<()> {
62         self.set_protected_vm_firmware_ipa(fw_addr, fw_max_size)
63     }
64 
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>>65     fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuAArch64>> {
66         Ok(Box::new(GunyahVm::create_vcpu(self, id)?))
67     }
68 
create_fdt(&self, fdt: &mut Fdt, phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()>69     fn create_fdt(&self, fdt: &mut Fdt, phandles: &BTreeMap<&str, u32>) -> cros_fdt::Result<()> {
70         let top_node = fdt.root_mut().subnode_mut("gunyah-vm-config")?;
71 
72         top_node.set_prop("image-name", "crosvm-vm")?;
73         top_node.set_prop("os-type", "linux")?;
74 
75         let memory_node = top_node.subnode_mut("memory")?;
76         memory_node.set_prop("#address-cells", 2u32)?;
77         memory_node.set_prop("#size-cells", 2u32)?;
78 
79         let mut base_set = false;
80         let mut firmware_set = false;
81         for region in self.guest_mem.regions() {
82             match region.options.purpose {
83                 MemoryRegionPurpose::GuestMemoryRegion => {
84                     // Assume first GuestMemoryRegion contains the payload
85                     if !base_set {
86                         base_set = true;
87                         memory_node.set_prop("base-address", region.guest_addr.offset())?;
88                     }
89                 }
90                 MemoryRegionPurpose::ProtectedFirmwareRegion => {
91                     if firmware_set {
92                         // Should only have one protected firmware memory region.
93                         error!("Multiple ProtectedFirmwareRegions unexpected.");
94                         unreachable!()
95                     }
96                     firmware_set = true;
97                     memory_node.set_prop("firmware-address", region.guest_addr.offset())?;
98                 }
99                 _ => {}
100             }
101         }
102 
103         let interrupts_node = top_node.subnode_mut("interrupts")?;
104         interrupts_node.set_prop("config", *phandles.get("intc").unwrap())?;
105 
106         let vcpus_node = top_node.subnode_mut("vcpus")?;
107         vcpus_node.set_prop("affinity", "proxy")?;
108 
109         let vdev_node = top_node.subnode_mut("vdevices")?;
110         vdev_node.set_prop("generate", "/hypervisor")?;
111 
112         for irq in self.routes.lock().iter() {
113             let bell_name = format!("bell-{:x}", irq.irq);
114             let bell_node = vdev_node.subnode_mut(&bell_name)?;
115             bell_node.set_prop("vdevice-type", "doorbell")?;
116             let path_name = format!("/hypervisor/bell-{:x}", irq.irq);
117             bell_node.set_prop("generate", path_name)?;
118             bell_node.set_prop("label", irq.irq)?;
119             bell_node.set_prop("peer-default", ())?;
120             bell_node.set_prop("source-can-clear", ())?;
121 
122             let interrupt_type = if irq.level {
123                 IRQ_TYPE_LEVEL_HIGH
124             } else {
125                 IRQ_TYPE_EDGE_RISING
126             };
127             let interrupts = [GIC_FDT_IRQ_TYPE_SPI, irq.irq, interrupt_type];
128             bell_node.set_prop("interrupts", &interrupts)?;
129         }
130 
131         let mut base_set = false;
132         for region in self.guest_mem.regions() {
133             let create_shm_node = match region.options.purpose {
134                 MemoryRegionPurpose::GuestMemoryRegion => {
135                     // Assume first GuestMemoryRegion contains the payload
136                     // This memory region is described by the "base-address" property
137                     // and doesn't get re-described as a separate shm node.
138                     let ret = base_set;
139                     base_set = true;
140                     ret
141                 }
142                 // Described by the "firmware-address" property
143                 MemoryRegionPurpose::ProtectedFirmwareRegion => false,
144                 MemoryRegionPurpose::StaticSwiotlbRegion => true,
145             };
146 
147             if create_shm_node {
148                 fdt_create_shm_device(
149                     vdev_node,
150                     region.index.try_into().unwrap(),
151                     region.guest_addr,
152                 )?;
153             }
154         }
155 
156         Ok(())
157     }
158 
init_arch( &self, payload_entry_address: GuestAddress, fdt_address: GuestAddress, fdt_size: usize, ) -> Result<()>159     fn init_arch(
160         &self,
161         payload_entry_address: GuestAddress,
162         fdt_address: GuestAddress,
163         fdt_size: usize,
164     ) -> Result<()> {
165         // Gunyah initializes the PC to be the payload entry (except for protected VMs)
166         // and assumes that the image is loaded at the beginning of the "primary"
167         // memory parcel (region). This parcel contains both DTB and kernel Image, so
168         // make sure that DTB and payload are in the same memory region and that
169         // payload is at the start of that region.
170 
171         let (dtb_mapping, _, dtb_obj_offset) = self
172             .guest_mem
173             .find_region(fdt_address)
174             .map_err(|_| Error::new(ENOENT))?;
175         let (payload_mapping, payload_offset, payload_obj_offset) = self
176             .guest_mem
177             .find_region(payload_entry_address)
178             .map_err(|_| Error::new(ENOENT))?;
179 
180         if !std::ptr::eq(dtb_mapping, payload_mapping) || dtb_obj_offset != payload_obj_offset {
181             panic!("DTB and payload are not part of same memory region.");
182         }
183 
184         if payload_offset != 0 {
185             panic!("Payload offset must be zero");
186         }
187 
188         self.set_dtb_config(fdt_address, fdt_size)?;
189 
190         self.start()?;
191 
192         Ok(())
193     }
194 }
195 
196 impl VcpuAArch64 for GunyahVcpu {
init(&self, _features: &[crate::VcpuFeature]) -> Result<()>197     fn init(&self, _features: &[crate::VcpuFeature]) -> Result<()> {
198         Ok(())
199     }
200 
init_pmu(&self, _irq: u64) -> Result<()>201     fn init_pmu(&self, _irq: u64) -> Result<()> {
202         Err(Error::new(ENOTSUP))
203     }
204 
has_pvtime_support(&self) -> bool205     fn has_pvtime_support(&self) -> bool {
206         false
207     }
208 
init_pvtime(&self, _pvtime_ipa: u64) -> Result<()>209     fn init_pvtime(&self, _pvtime_ipa: u64) -> Result<()> {
210         Err(Error::new(ENOTSUP))
211     }
212 
set_one_reg(&self, _reg_id: VcpuRegAArch64, _data: u64) -> Result<()>213     fn set_one_reg(&self, _reg_id: VcpuRegAArch64, _data: u64) -> Result<()> {
214         unimplemented!()
215     }
216 
get_one_reg(&self, _reg_id: VcpuRegAArch64) -> Result<u64>217     fn get_one_reg(&self, _reg_id: VcpuRegAArch64) -> Result<u64> {
218         Err(Error::new(ENOTSUP))
219     }
220 
set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()>221     fn set_vector_reg(&self, _reg_num: u8, _data: u128) -> Result<()> {
222         unimplemented!()
223     }
224 
get_vector_reg(&self, _reg_num: u8) -> Result<u128>225     fn get_vector_reg(&self, _reg_num: u8) -> Result<u128> {
226         unimplemented!()
227     }
228 
get_psci_version(&self) -> Result<PsciVersion>229     fn get_psci_version(&self) -> Result<PsciVersion> {
230         Ok(PSCI_0_2)
231     }
232 
set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()>233     fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
234         Err(Error::new(ENOTSUP))
235     }
236 
get_max_hw_bps(&self) -> Result<usize>237     fn get_max_hw_bps(&self) -> Result<usize> {
238         Err(Error::new(ENOTSUP))
239     }
240 
get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>>241     fn get_system_regs(&self) -> Result<BTreeMap<AArch64SysRegId, u64>> {
242         Err(Error::new(ENOTSUP))
243     }
244 
get_cache_info(&self) -> Result<BTreeMap<u8, u64>>245     fn get_cache_info(&self) -> Result<BTreeMap<u8, u64>> {
246         Err(Error::new(ENOTSUP))
247     }
248 
set_cache_info(&self, _cache_info: BTreeMap<u8, u64>) -> Result<()>249     fn set_cache_info(&self, _cache_info: BTreeMap<u8, u64>) -> Result<()> {
250         Err(Error::new(ENOTSUP))
251     }
252 
hypervisor_specific_snapshot(&self) -> anyhow::Result<serde_json::Value>253     fn hypervisor_specific_snapshot(&self) -> anyhow::Result<serde_json::Value> {
254         unimplemented!()
255     }
256 
hypervisor_specific_restore(&self, _data: serde_json::Value) -> anyhow::Result<()>257     fn hypervisor_specific_restore(&self, _data: serde_json::Value) -> anyhow::Result<()> {
258         unimplemented!()
259     }
260 }
261