1 // Copyright 2020 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 //! A crate for abstracting the underlying kernel hypervisor used in crosvm. 6 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] 7 pub mod aarch64; 8 pub mod caps; 9 10 #[cfg(all( 11 unix, 12 any(target_arch = "arm", target_arch = "aarch64"), 13 feature = "gunyah" 14 ))] 15 pub mod gunyah; 16 #[cfg(all(windows, feature = "haxm"))] 17 pub mod haxm; 18 #[cfg(any(target_os = "android", target_os = "linux"))] 19 pub mod kvm; 20 #[cfg(target_arch = "riscv64")] 21 pub mod riscv64; 22 #[cfg(all(windows, feature = "whpx"))] 23 pub mod whpx; 24 #[cfg(target_arch = "x86_64")] 25 pub mod x86_64; 26 27 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] 28 #[cfg(all(unix, feature = "geniezone"))] 29 pub mod geniezone; 30 31 use base::AsRawDescriptor; 32 use base::Event; 33 use base::MappedRegion; 34 use base::Protection; 35 use base::Result; 36 use base::SafeDescriptor; 37 use serde::Deserialize; 38 use serde::Serialize; 39 use vm_memory::GuestAddress; 40 use vm_memory::GuestMemory; 41 42 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] 43 pub use crate::aarch64::*; 44 pub use crate::caps::*; 45 #[cfg(target_arch = "riscv64")] 46 pub use crate::riscv64::*; 47 #[cfg(target_arch = "x86_64")] 48 pub use crate::x86_64::*; 49 50 /// An index in the list of guest-mapped memory regions. 51 pub type MemSlot = u32; 52 53 /// Range of GPA space. Starting from `guest_address` up to `size`. 54 pub struct MemRegion { 55 pub guest_address: GuestAddress, 56 pub size: u64, 57 } 58 59 /// Signal to the hypervisor on kernels that support the KVM_CAP_USER_CONFIGURE_NONCOHERENT_DMA (or 60 /// equivalent) that during user memory region (memslot) configuration, a guest page's memtype 61 /// should be considered in SLAT effective memtype determination rather than implicitly respecting 62 /// only the host page's memtype. 63 /// 64 /// This explicit control is needed for Virtio devices (e.g. gpu) that configure memslots for host 65 /// WB page mappings with guest WC page mappings. See b/316337317, b/360295883 for more detail. 66 #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 67 pub enum MemCacheType { 68 /// Don't provide any explicit instruction to the hypervisor on how it should determine a 69 /// memslot's effective memtype. 70 /// 71 /// On KVM-VMX (Intel), this means that the memslot is flagged with VMX_EPT_IPAT_BIT such that 72 /// only the host memtype is respected. 73 CacheCoherent, 74 /// explicitly instruct the hypervisor to respect the guest page's memtype when determining the 75 /// memslot's effective memtype. 76 /// 77 /// On KVM-VMX (Intel), this means the memslot is NOT flagged with VMX_EPT_IPAT_BIT, and the 78 /// effective memtype will generally decay to the weaker amongst the host/guest memtypes and 79 /// the MTRR for the physical address. 80 CacheNonCoherent, 81 } 82 83 /// This is intended for use with virtio-balloon, where a guest driver determines unused ranges and 84 /// requests they be freed. Use without the guest's knowledge is sure to break something. 85 pub enum BalloonEvent { 86 /// Balloon event when the region is acquired from the guest. The guest cannot access this 87 /// region any more. The guest memory can be reclaimed by the host OS. As per virtio-balloon 88 /// spec, the given address and size are intended to be page-aligned. 89 Inflate(MemRegion), 90 /// Balloon event when the region is returned to the guest. VMM should reallocate memory and 91 /// register it with the hypervisor for accesses by the guest. 92 Deflate(MemRegion), 93 /// Balloon event when the requested memory size is achieved. This can be achieved through 94 /// either inflation or deflation. The `u64` will be the current size of the balloon in bytes. 95 BalloonTargetReached(u64), 96 } 97 98 /// A trait for checking hypervisor capabilities. 99 pub trait Hypervisor: Send { 100 /// Makes a shallow clone of this `Hypervisor`. try_clone(&self) -> Result<Self> where Self: Sized101 fn try_clone(&self) -> Result<Self> 102 where 103 Self: Sized; 104 105 /// Checks if a particular `HypervisorCap` is available. check_capability(&self, cap: HypervisorCap) -> bool106 fn check_capability(&self, cap: HypervisorCap) -> bool; 107 } 108 109 /// A wrapper for using a VM and getting/setting its state. 110 pub trait Vm: Send { 111 /// Makes a shallow clone of this `Vm`. try_clone(&self) -> Result<Self> where Self: Sized112 fn try_clone(&self) -> Result<Self> 113 where 114 Self: Sized; 115 116 /// Checks if a particular `VmCap` is available. 117 /// 118 /// This is distinct from the `Hypervisor` version of this method because some extensions depend 119 /// on the particular `Vm` instance. This method is encouraged because it more accurately 120 /// reflects the usable capabilities. check_capability(&self, c: VmCap) -> bool121 fn check_capability(&self, c: VmCap) -> bool; 122 123 /// Enable the VM capabilities. enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool>124 fn enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool> { 125 Err(std::io::Error::from(std::io::ErrorKind::Unsupported).into()) 126 } 127 128 /// Get the guest physical address size in bits. get_guest_phys_addr_bits(&self) -> u8129 fn get_guest_phys_addr_bits(&self) -> u8; 130 131 /// Gets the guest-mapped memory for the Vm. get_memory(&self) -> &GuestMemory132 fn get_memory(&self) -> &GuestMemory; 133 134 /// Inserts the given `MappedRegion` into the VM's address space at `guest_addr`. 135 /// 136 /// The slot that was assigned the memory mapping is returned on success. The slot can be given 137 /// to `Vm::remove_memory_region` to remove the memory from the VM's address space and take back 138 /// ownership of `mem_region`. 139 /// 140 /// Note that memory inserted into the VM's address space must not overlap with any other memory 141 /// slot's region. 142 /// 143 /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to 144 /// write will trigger a mmio VM exit, leaving the memory untouched. 145 /// 146 /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to 147 /// by the guest with `get_dirty_log`. 148 /// 149 /// `cache` can be used to set guest mem cache attribute if supported. Default is cache coherent 150 /// memory. Noncoherent memory means this memory might not be coherent from all access points, 151 /// e.g this could be the case when host GPU doesn't set the memory to be coherent with CPU 152 /// access. Setting this attribute would allow hypervisor to adjust guest mem control to ensure 153 /// synchronized guest access in noncoherent DMA case. add_memory_region( &mut self, guest_addr: GuestAddress, mem_region: Box<dyn MappedRegion>, read_only: bool, log_dirty_pages: bool, cache: MemCacheType, ) -> Result<MemSlot>154 fn add_memory_region( 155 &mut self, 156 guest_addr: GuestAddress, 157 mem_region: Box<dyn MappedRegion>, 158 read_only: bool, 159 log_dirty_pages: bool, 160 cache: MemCacheType, 161 ) -> Result<MemSlot>; 162 163 /// Does a synchronous msync of the memory mapped at `slot`, syncing `size` bytes starting at 164 /// `offset` from the start of the region. `offset` must be page aligned. msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>165 fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>; 166 167 /// Gives a MADV_PAGEOUT advice to the memory region mapped at `slot`, with the address range 168 /// starting at `offset` from the start of the region, and with size `size`. `offset` 169 /// must be page aligned. 170 #[cfg(any(target_os = "android", target_os = "linux"))] madvise_pageout_memory_region( &mut self, slot: MemSlot, offset: usize, size: usize, ) -> Result<()>171 fn madvise_pageout_memory_region( 172 &mut self, 173 slot: MemSlot, 174 offset: usize, 175 size: usize, 176 ) -> Result<()>; 177 178 /// Gives a MADV_REMOVE advice to the memory region mapped at `slot`, with the address range 179 /// starting at `offset` from the start of the region, and with size `size`. `offset` 180 /// must be page aligned. 181 #[cfg(any(target_os = "android", target_os = "linux"))] madvise_remove_memory_region( &mut self, slot: MemSlot, offset: usize, size: usize, ) -> Result<()>182 fn madvise_remove_memory_region( 183 &mut self, 184 slot: MemSlot, 185 offset: usize, 186 size: usize, 187 ) -> Result<()>; 188 189 /// Removes and drops the `UserMemoryRegion` that was previously added at the given slot. remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>190 fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>; 191 192 /// Creates an emulated device. create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>193 fn create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>; 194 195 /// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at 196 /// `slot`. Only works on VMs that support `VmCap::DirtyLog`. 197 /// 198 /// The size of `dirty_log` must be at least as many bits as there are pages in the memory 199 /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must 200 /// be 2 bytes or greater. get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>201 fn get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>; 202 203 /// Registers an event to be signaled whenever a certain address is written to. 204 /// 205 /// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the 206 /// value being written is equal to `datamatch`. Note that the size of `datamatch` is important 207 /// and must match the expected size of the guest's write. 208 /// 209 /// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be 210 /// triggered is prevented. register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>211 fn register_ioevent( 212 &mut self, 213 evt: &Event, 214 addr: IoEventAddress, 215 datamatch: Datamatch, 216 ) -> Result<()>; 217 218 /// Unregisters an event previously registered with `register_ioevent`. 219 /// 220 /// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into 221 /// `register_ioevent`. unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>222 fn unregister_ioevent( 223 &mut self, 224 evt: &Event, 225 addr: IoEventAddress, 226 datamatch: Datamatch, 227 ) -> Result<()>; 228 229 /// Trigger any matching registered io events based on an MMIO or PIO write at `addr`. The 230 /// `data` slice represents the contents and length of the write, which is used to compare with 231 /// the registered io events' Datamatch values. If the hypervisor does in-kernel IO event 232 /// delivery, this is a no-op. handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>233 fn handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>; 234 235 /// Retrieves the current timestamp of the paravirtual clock as seen by the current guest. 236 /// Only works on VMs that support `VmCap::PvClock`. get_pvclock(&self) -> Result<ClockState>237 fn get_pvclock(&self) -> Result<ClockState>; 238 239 /// Sets the current timestamp of the paravirtual clock as seen by the current guest. 240 /// Only works on VMs that support `VmCap::PvClock`. set_pvclock(&self, state: &ClockState) -> Result<()>241 fn set_pvclock(&self, state: &ClockState) -> Result<()>; 242 243 /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd` 244 /// at `offset` bytes from the start of the arena with `prot` protections. 245 /// `offset` must be page aligned. 246 /// 247 /// # Arguments 248 /// * `offset` - Page aligned offset into the arena in bytes. 249 /// * `size` - Size of memory region in bytes. 250 /// * `fd` - File descriptor to mmap from. 251 /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap. 252 /// * `prot` - Protection (e.g. readable/writable) of the memory region. add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>253 fn add_fd_mapping( 254 &mut self, 255 slot: u32, 256 offset: usize, 257 size: usize, 258 fd: &dyn AsRawDescriptor, 259 fd_offset: u64, 260 prot: Protection, 261 ) -> Result<()>; 262 263 /// Remove `size`-byte mapping starting at `offset`. remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>264 fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>; 265 266 /// Events from virtio-balloon that affect the state for guest memory and host memory. handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>267 fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>; 268 } 269 270 /// Operation for Io and Mmio 271 #[derive(Debug)] 272 pub enum IoOperation<'a> { 273 /// Data to be read from a device on the bus. 274 /// 275 /// The `handle_fn` should fill the entire slice with the read data. 276 Read(&'a mut [u8]), 277 278 /// Data to be written to a device on the bus. 279 Write(&'a [u8]), 280 } 281 282 /// Parameters describing an MMIO or PIO from the guest. 283 #[derive(Debug)] 284 pub struct IoParams<'a> { 285 pub address: u64, 286 pub operation: IoOperation<'a>, 287 } 288 289 /// Handle to a virtual CPU that may be used to request a VM exit from within a signal handler. 290 #[cfg(any(target_os = "android", target_os = "linux"))] 291 pub struct VcpuSignalHandle { 292 inner: Box<dyn VcpuSignalHandleInner>, 293 } 294 295 #[cfg(any(target_os = "android", target_os = "linux"))] 296 impl VcpuSignalHandle { 297 /// Request an immediate exit for this VCPU. 298 /// 299 /// This function is safe to call from a signal handler. signal_immediate_exit(&self)300 pub fn signal_immediate_exit(&self) { 301 self.inner.signal_immediate_exit() 302 } 303 } 304 305 /// Signal-safe mechanism for requesting an immediate VCPU exit. 306 /// 307 /// Each hypervisor backend must implement this for its VCPU type. 308 #[cfg(any(target_os = "android", target_os = "linux"))] 309 pub(crate) trait VcpuSignalHandleInner { 310 /// Signal the associated VCPU to exit if it is currently running. 311 /// 312 /// # Safety 313 /// 314 /// The implementation of this function must be async signal safe. 315 /// <https://man7.org/linux/man-pages/man7/signal-safety.7.html> signal_immediate_exit(&self)316 fn signal_immediate_exit(&self); 317 } 318 319 /// A virtual CPU holding a virtualized hardware thread's state, such as registers and interrupt 320 /// state, which may be used to execute virtual machines. 321 pub trait Vcpu: downcast_rs::DowncastSync { 322 /// Makes a shallow clone of this `Vcpu`. try_clone(&self) -> Result<Self> where Self: Sized323 fn try_clone(&self) -> Result<Self> 324 where 325 Self: Sized; 326 327 /// Casts this architecture specific trait object to the base trait object `Vcpu`. as_vcpu(&self) -> &dyn Vcpu328 fn as_vcpu(&self) -> &dyn Vcpu; 329 330 /// Runs the VCPU until it exits, returning the reason for the exit. run(&mut self) -> Result<VcpuExit>331 fn run(&mut self) -> Result<VcpuExit>; 332 333 /// Returns the vcpu id. id(&self) -> usize334 fn id(&self) -> usize; 335 336 /// Sets the bit that requests an immediate exit. set_immediate_exit(&self, exit: bool)337 fn set_immediate_exit(&self, exit: bool); 338 339 /// Returns a handle that can be used to cause this VCPU to exit from `run()` from a signal 340 /// handler. 341 #[cfg(any(target_os = "android", target_os = "linux"))] signal_handle(&self) -> VcpuSignalHandle342 fn signal_handle(&self) -> VcpuSignalHandle; 343 344 /// Handles an incoming MMIO request from the guest. 345 /// 346 /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`, and in the same 347 /// thread as run(). 348 /// 349 /// Once called, it will determine whether a MMIO read or MMIO write was the reason for the MMIO 350 /// exit, call `handle_fn` with the respective IoParams to perform the MMIO read or write, and 351 /// set the return data in the vcpu so that the vcpu can resume running. handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>352 fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>; 353 354 /// Handles an incoming PIO from the guest. 355 /// 356 /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`, and in the same 357 /// thread as run(). 358 /// 359 /// Once called, it will determine whether an input or output was the reason for the Io exit, 360 /// call `handle_fn` with the respective IoParams to perform the input/output operation, and set 361 /// the return data in the vcpu so that the vcpu can resume running. handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>362 fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>; 363 364 /// Signals to the hypervisor that this Vcpu is being paused by userspace. on_suspend(&self) -> Result<()>365 fn on_suspend(&self) -> Result<()>; 366 367 /// Enables a hypervisor-specific extension on this Vcpu. `cap` is a constant defined by the 368 /// hypervisor API (e.g., kvm.h). `args` are the arguments for enabling the feature, if any. 369 /// 370 /// # Safety 371 /// This function is marked as unsafe because `args` may be interpreted as pointers for some 372 /// capabilities. The caller must ensure that any pointers passed in the `args` array are 373 /// allocated as the kernel expects, and that mutable pointers are owned. enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>374 unsafe fn enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>; 375 } 376 377 downcast_rs::impl_downcast!(sync Vcpu); 378 379 /// An address either in programmable I/O space or in memory mapped I/O space. 380 #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, std::hash::Hash)] 381 pub enum IoEventAddress { 382 Pio(u64), 383 Mmio(u64), 384 } 385 386 /// Used in `Vm::register_ioevent` to indicate a size and optionally value to match. 387 #[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] 388 pub enum Datamatch { 389 AnyLength, 390 U8(Option<u8>), 391 U16(Option<u16>), 392 U32(Option<u32>), 393 U64(Option<u64>), 394 } 395 396 #[derive(Copy, Clone, Debug)] 397 pub enum VcpuShutdownErrorKind { 398 DoubleFault, 399 TripleFault, 400 Other, 401 } 402 403 /// A Vcpu shutdown may signify an error, such as a double or triple fault, 404 /// or hypervisor specific reasons. This error covers all such cases. 405 #[derive(Copy, Clone, Debug)] 406 pub struct VcpuShutdownError { 407 kind: VcpuShutdownErrorKind, 408 raw_error_code: u64, 409 } 410 411 impl VcpuShutdownError { new(kind: VcpuShutdownErrorKind, raw_error_code: u64) -> VcpuShutdownError412 pub fn new(kind: VcpuShutdownErrorKind, raw_error_code: u64) -> VcpuShutdownError { 413 Self { 414 kind, 415 raw_error_code, 416 } 417 } kind(&self) -> VcpuShutdownErrorKind418 pub fn kind(&self) -> VcpuShutdownErrorKind { 419 self.kind 420 } get_raw_error_code(&self) -> u64421 pub fn get_raw_error_code(&self) -> u64 { 422 self.raw_error_code 423 } 424 } 425 426 // Note that when adding entries to the VcpuExit enum you may want to add corresponding entries in 427 // crosvm::stats::exit_to_index and crosvm::stats::exit_index_to_str if you don't want the new 428 // exit type to be categorized as "Unknown". 429 430 /// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called. 431 #[derive(Debug, Clone, Copy)] 432 pub enum VcpuExit { 433 /// An io instruction needs to be emulated. 434 /// vcpu handle_io should be called to handle the io operation 435 Io, 436 /// A mmio instruction needs to be emulated. 437 /// vcpu handle_mmio should be called to handle the mmio operation 438 Mmio, 439 IoapicEoi { 440 vector: u8, 441 }, 442 Exception, 443 Hypercall, 444 Debug, 445 Hlt, 446 IrqWindowOpen, 447 Shutdown(std::result::Result<(), VcpuShutdownError>), 448 FailEntry { 449 hardware_entry_failure_reason: u64, 450 }, 451 Intr, 452 SetTpr, 453 TprAccess, 454 InternalError, 455 SystemEventShutdown, 456 SystemEventReset, 457 SystemEventCrash, 458 /// An invalid vcpu register was set while running. 459 InvalidVpRegister, 460 /// incorrect setup for vcpu requiring an unsupported feature 461 UnsupportedFeature, 462 /// vcpu run was user cancelled 463 Canceled, 464 /// an unrecoverable exception was encountered (different from Exception) 465 UnrecoverableException, 466 /// vcpu stopped due to an msr access. 467 MsrAccess, 468 /// vcpu stopped due to a cpuid request. 469 #[cfg(target_arch = "x86_64")] 470 Cpuid { 471 entry: CpuIdEntry, 472 }, 473 /// vcpu stopped due to calling rdtsc 474 RdTsc, 475 /// vcpu stopped for an apic smi trap 476 ApicSmiTrap, 477 /// vcpu stopped due to an apic trap 478 ApicInitSipiTrap, 479 /// vcpu stoppted due to bus lock 480 BusLock, 481 /// Riscv supervisor call. 482 Sbi { 483 extension_id: u64, 484 function_id: u64, 485 args: [u64; 6], 486 }, 487 /// Emulate CSR access from guest. 488 RiscvCsr { 489 csr_num: u64, 490 new_value: u64, 491 write_mask: u64, 492 ret_value: u64, 493 }, 494 } 495 496 /// A device type to create with `Vm.create_device`. 497 #[derive(Clone, Copy, Debug, PartialEq, Eq)] 498 pub enum DeviceKind { 499 /// VFIO device for direct access to devices from userspace 500 Vfio, 501 /// ARM virtual general interrupt controller v2 502 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] 503 ArmVgicV2, 504 /// ARM virtual general interrupt controller v3 505 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] 506 ArmVgicV3, 507 /// RiscV AIA in-kernel emulation 508 #[cfg(target_arch = "riscv64")] 509 RiscvAia, 510 } 511 512 /// The source chip of an `IrqSource` 513 #[repr(C)] 514 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 515 pub enum IrqSourceChip { 516 PicPrimary, 517 PicSecondary, 518 Ioapic, 519 Gic, 520 Aia, 521 } 522 523 /// A source of IRQs in an `IrqRoute`. 524 #[repr(C)] 525 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 526 pub enum IrqSource { 527 Irqchip { chip: IrqSourceChip, pin: u32 }, 528 Msi { address: u64, data: u32 }, 529 } 530 531 /// A single route for an IRQ. 532 #[repr(C)] 533 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 534 pub struct IrqRoute { 535 pub gsi: u32, 536 pub source: IrqSource, 537 } 538 539 /// The state of the paravirtual clock. 540 #[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)] 541 pub struct ClockState { 542 /// Current pv clock timestamp, as seen by the guest 543 pub clock: u64, 544 } 545 546 /// The MPState represents the state of a processor. 547 #[repr(C)] 548 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] 549 pub enum MPState { 550 /// the vcpu is currently running (x86/x86_64,arm/arm64) 551 Runnable, 552 /// the vcpu is an application processor (AP) which has not yet received an INIT signal 553 /// (x86/x86_64) 554 Uninitialized, 555 /// the vcpu has received an INIT signal, and is now ready for a SIPI (x86/x86_64) 556 InitReceived, 557 /// the vcpu has executed a HLT instruction and is waiting for an interrupt (x86/x86_64) 558 Halted, 559 /// the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) (x86/x86_64) 560 SipiReceived, 561 /// the vcpu is stopped (arm/arm64) 562 Stopped, 563 } 564 565 /// Whether the VM should be run in protected mode or not. 566 #[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] 567 pub enum ProtectionType { 568 /// The VM should be run in the unprotected mode, where the host has access to its memory. 569 Unprotected, 570 /// The VM should be run in protected mode, so the host cannot access its memory directly. It 571 /// should be booted via the protected VM firmware, so that it can access its secrets. 572 Protected, 573 /// The VM should be run in protected mode, so the host cannot access its memory directly. It 574 /// should be booted via a custom VM firmware, useful for debugging and testing. 575 ProtectedWithCustomFirmware, 576 /// The VM should be run in protected mode, but booted directly without pVM firmware. The host 577 /// will still be unable to access the VM memory, but it won't be given any secrets. 578 ProtectedWithoutFirmware, 579 /// The VM should be run in unprotected mode, but with the same memory layout as protected 580 /// mode, protected VM firmware loaded, and simulating protected mode as much as possible. 581 /// This is useful for debugging the protected VM firmware and other protected mode issues. 582 UnprotectedWithFirmware, 583 } 584 585 impl ProtectionType { 586 /// Returns whether the hypervisor will prevent us from accessing the VM's memory. isolates_memory(&self) -> bool587 pub fn isolates_memory(&self) -> bool { 588 matches!( 589 self, 590 Self::Protected | Self::ProtectedWithCustomFirmware | Self::ProtectedWithoutFirmware 591 ) 592 } 593 594 /// Returns whether the VMM needs to load the pVM firmware. needs_firmware_loaded(&self) -> bool595 pub fn needs_firmware_loaded(&self) -> bool { 596 matches!( 597 self, 598 Self::UnprotectedWithFirmware | Self::ProtectedWithCustomFirmware 599 ) 600 } 601 602 /// Returns whether the VM runs a pVM firmware. runs_firmware(&self) -> bool603 pub fn runs_firmware(&self) -> bool { 604 self.needs_firmware_loaded() || matches!(self, Self::Protected) 605 } 606 } 607 608 #[derive(Clone, Copy)] 609 pub struct Config { 610 #[cfg(target_arch = "aarch64")] 611 /// enable the Memory Tagging Extension in the guest 612 pub mte: bool, 613 pub protection_type: ProtectionType, 614 } 615 616 impl Default for Config { default() -> Config617 fn default() -> Config { 618 Config { 619 #[cfg(target_arch = "aarch64")] 620 mte: false, 621 protection_type: ProtectionType::Unprotected, 622 } 623 } 624 } 625