xref: /aosp_15_r20/external/crosvm/hypervisor/src/kvm/mod.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1*bb4ee6a4SAndroid Build Coastguard Worker // Copyright 2020 The ChromiumOS Authors
2*bb4ee6a4SAndroid Build Coastguard Worker // Use of this source code is governed by a BSD-style license that can be
3*bb4ee6a4SAndroid Build Coastguard Worker // found in the LICENSE file.
4*bb4ee6a4SAndroid Build Coastguard Worker 
5*bb4ee6a4SAndroid Build Coastguard Worker #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
6*bb4ee6a4SAndroid Build Coastguard Worker mod aarch64;
7*bb4ee6a4SAndroid Build Coastguard Worker #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
8*bb4ee6a4SAndroid Build Coastguard Worker pub use aarch64::*;
9*bb4ee6a4SAndroid Build Coastguard Worker 
10*bb4ee6a4SAndroid Build Coastguard Worker mod cap;
11*bb4ee6a4SAndroid Build Coastguard Worker pub use cap::KvmCap;
12*bb4ee6a4SAndroid Build Coastguard Worker 
13*bb4ee6a4SAndroid Build Coastguard Worker #[cfg(target_arch = "riscv64")]
14*bb4ee6a4SAndroid Build Coastguard Worker mod riscv64;
15*bb4ee6a4SAndroid Build Coastguard Worker 
16*bb4ee6a4SAndroid Build Coastguard Worker #[cfg(target_arch = "x86_64")]
17*bb4ee6a4SAndroid Build Coastguard Worker mod x86_64;
18*bb4ee6a4SAndroid Build Coastguard Worker 
19*bb4ee6a4SAndroid Build Coastguard Worker use std::cmp::Reverse;
20*bb4ee6a4SAndroid Build Coastguard Worker use std::collections::BTreeMap;
21*bb4ee6a4SAndroid Build Coastguard Worker use std::collections::BinaryHeap;
22*bb4ee6a4SAndroid Build Coastguard Worker use std::convert::TryFrom;
23*bb4ee6a4SAndroid Build Coastguard Worker use std::ffi::CString;
24*bb4ee6a4SAndroid Build Coastguard Worker use std::fs::File;
25*bb4ee6a4SAndroid Build Coastguard Worker use std::os::raw::c_ulong;
26*bb4ee6a4SAndroid Build Coastguard Worker use std::os::raw::c_void;
27*bb4ee6a4SAndroid Build Coastguard Worker use std::os::unix::prelude::OsStrExt;
28*bb4ee6a4SAndroid Build Coastguard Worker use std::path::Path;
29*bb4ee6a4SAndroid Build Coastguard Worker use std::sync::Arc;
30*bb4ee6a4SAndroid Build Coastguard Worker 
31*bb4ee6a4SAndroid Build Coastguard Worker use base::errno_result;
32*bb4ee6a4SAndroid Build Coastguard Worker use base::error;
33*bb4ee6a4SAndroid Build Coastguard Worker use base::ioctl;
34*bb4ee6a4SAndroid Build Coastguard Worker use base::ioctl_with_mut_ref;
35*bb4ee6a4SAndroid Build Coastguard Worker use base::ioctl_with_ref;
36*bb4ee6a4SAndroid Build Coastguard Worker use base::ioctl_with_val;
37*bb4ee6a4SAndroid Build Coastguard Worker use base::pagesize;
38*bb4ee6a4SAndroid Build Coastguard Worker use base::AsRawDescriptor;
39*bb4ee6a4SAndroid Build Coastguard Worker use base::Error;
40*bb4ee6a4SAndroid Build Coastguard Worker use base::Event;
41*bb4ee6a4SAndroid Build Coastguard Worker use base::FromRawDescriptor;
42*bb4ee6a4SAndroid Build Coastguard Worker use base::MappedRegion;
43*bb4ee6a4SAndroid Build Coastguard Worker use base::MemoryMapping;
44*bb4ee6a4SAndroid Build Coastguard Worker use base::MemoryMappingBuilder;
45*bb4ee6a4SAndroid Build Coastguard Worker use base::MmapError;
46*bb4ee6a4SAndroid Build Coastguard Worker use base::Protection;
47*bb4ee6a4SAndroid Build Coastguard Worker use base::RawDescriptor;
48*bb4ee6a4SAndroid Build Coastguard Worker use base::Result;
49*bb4ee6a4SAndroid Build Coastguard Worker use base::SafeDescriptor;
50*bb4ee6a4SAndroid Build Coastguard Worker use data_model::vec_with_array_field;
51*bb4ee6a4SAndroid Build Coastguard Worker use kvm_sys::*;
52*bb4ee6a4SAndroid Build Coastguard Worker use libc::open64;
53*bb4ee6a4SAndroid Build Coastguard Worker use libc::EFAULT;
54*bb4ee6a4SAndroid Build Coastguard Worker use libc::EINVAL;
55*bb4ee6a4SAndroid Build Coastguard Worker use libc::EIO;
56*bb4ee6a4SAndroid Build Coastguard Worker use libc::ENOENT;
57*bb4ee6a4SAndroid Build Coastguard Worker use libc::ENOSPC;
58*bb4ee6a4SAndroid Build Coastguard Worker use libc::ENOSYS;
59*bb4ee6a4SAndroid Build Coastguard Worker use libc::EOVERFLOW;
60*bb4ee6a4SAndroid Build Coastguard Worker use libc::O_CLOEXEC;
61*bb4ee6a4SAndroid Build Coastguard Worker use libc::O_RDWR;
62*bb4ee6a4SAndroid Build Coastguard Worker #[cfg(target_arch = "riscv64")]
63*bb4ee6a4SAndroid Build Coastguard Worker use riscv64::*;
64*bb4ee6a4SAndroid Build Coastguard Worker use sync::Mutex;
65*bb4ee6a4SAndroid Build Coastguard Worker use vm_memory::GuestAddress;
66*bb4ee6a4SAndroid Build Coastguard Worker use vm_memory::GuestMemory;
67*bb4ee6a4SAndroid Build Coastguard Worker #[cfg(target_arch = "x86_64")]
68*bb4ee6a4SAndroid Build Coastguard Worker pub use x86_64::*;
69*bb4ee6a4SAndroid Build Coastguard Worker 
70*bb4ee6a4SAndroid Build Coastguard Worker use crate::BalloonEvent;
71*bb4ee6a4SAndroid Build Coastguard Worker use crate::ClockState;
72*bb4ee6a4SAndroid Build Coastguard Worker use crate::Config;
73*bb4ee6a4SAndroid Build Coastguard Worker use crate::Datamatch;
74*bb4ee6a4SAndroid Build Coastguard Worker use crate::DeviceKind;
75*bb4ee6a4SAndroid Build Coastguard Worker use crate::Hypervisor;
76*bb4ee6a4SAndroid Build Coastguard Worker use crate::HypervisorCap;
77*bb4ee6a4SAndroid Build Coastguard Worker use crate::IoEventAddress;
78*bb4ee6a4SAndroid Build Coastguard Worker use crate::IoOperation;
79*bb4ee6a4SAndroid Build Coastguard Worker use crate::IoParams;
80*bb4ee6a4SAndroid Build Coastguard Worker use crate::IrqRoute;
81*bb4ee6a4SAndroid Build Coastguard Worker use crate::IrqSource;
82*bb4ee6a4SAndroid Build Coastguard Worker use crate::MPState;
83*bb4ee6a4SAndroid Build Coastguard Worker use crate::MemCacheType;
84*bb4ee6a4SAndroid Build Coastguard Worker use crate::MemSlot;
85*bb4ee6a4SAndroid Build Coastguard Worker use crate::Vcpu;
86*bb4ee6a4SAndroid Build Coastguard Worker use crate::VcpuExit;
87*bb4ee6a4SAndroid Build Coastguard Worker use crate::VcpuSignalHandle;
88*bb4ee6a4SAndroid Build Coastguard Worker use crate::VcpuSignalHandleInner;
89*bb4ee6a4SAndroid Build Coastguard Worker use crate::Vm;
90*bb4ee6a4SAndroid Build Coastguard Worker use crate::VmCap;
91*bb4ee6a4SAndroid Build Coastguard Worker 
92*bb4ee6a4SAndroid Build Coastguard Worker // Wrapper around KVM_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
93*bb4ee6a4SAndroid Build Coastguard Worker // from guest physical to host user pages.
94*bb4ee6a4SAndroid Build Coastguard Worker //
95*bb4ee6a4SAndroid Build Coastguard Worker // SAFETY:
96*bb4ee6a4SAndroid Build Coastguard Worker // Safe when the guest regions are guaranteed not to overlap.
set_user_memory_region( descriptor: &SafeDescriptor, slot: MemSlot, read_only: bool, log_dirty_pages: bool, cache: MemCacheType, guest_addr: u64, memory_size: u64, userspace_addr: *mut u8, ) -> Result<()>97*bb4ee6a4SAndroid Build Coastguard Worker unsafe fn set_user_memory_region(
98*bb4ee6a4SAndroid Build Coastguard Worker     descriptor: &SafeDescriptor,
99*bb4ee6a4SAndroid Build Coastguard Worker     slot: MemSlot,
100*bb4ee6a4SAndroid Build Coastguard Worker     read_only: bool,
101*bb4ee6a4SAndroid Build Coastguard Worker     log_dirty_pages: bool,
102*bb4ee6a4SAndroid Build Coastguard Worker     cache: MemCacheType,
103*bb4ee6a4SAndroid Build Coastguard Worker     guest_addr: u64,
104*bb4ee6a4SAndroid Build Coastguard Worker     memory_size: u64,
105*bb4ee6a4SAndroid Build Coastguard Worker     userspace_addr: *mut u8,
106*bb4ee6a4SAndroid Build Coastguard Worker ) -> Result<()> {
107*bb4ee6a4SAndroid Build Coastguard Worker     let mut flags = if read_only { KVM_MEM_READONLY } else { 0 };
108*bb4ee6a4SAndroid Build Coastguard Worker     if log_dirty_pages {
109*bb4ee6a4SAndroid Build Coastguard Worker         flags |= KVM_MEM_LOG_DIRTY_PAGES;
110*bb4ee6a4SAndroid Build Coastguard Worker     }
111*bb4ee6a4SAndroid Build Coastguard Worker     if cache == MemCacheType::CacheNonCoherent {
112*bb4ee6a4SAndroid Build Coastguard Worker         flags |= KVM_MEM_NON_COHERENT_DMA;
113*bb4ee6a4SAndroid Build Coastguard Worker     }
114*bb4ee6a4SAndroid Build Coastguard Worker     let region = kvm_userspace_memory_region {
115*bb4ee6a4SAndroid Build Coastguard Worker         slot,
116*bb4ee6a4SAndroid Build Coastguard Worker         flags,
117*bb4ee6a4SAndroid Build Coastguard Worker         guest_phys_addr: guest_addr,
118*bb4ee6a4SAndroid Build Coastguard Worker         memory_size,
119*bb4ee6a4SAndroid Build Coastguard Worker         userspace_addr: userspace_addr as u64,
120*bb4ee6a4SAndroid Build Coastguard Worker     };
121*bb4ee6a4SAndroid Build Coastguard Worker 
122*bb4ee6a4SAndroid Build Coastguard Worker     let ret = ioctl_with_ref(descriptor, KVM_SET_USER_MEMORY_REGION, &region);
123*bb4ee6a4SAndroid Build Coastguard Worker     if ret == 0 {
124*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
125*bb4ee6a4SAndroid Build Coastguard Worker     } else {
126*bb4ee6a4SAndroid Build Coastguard Worker         errno_result()
127*bb4ee6a4SAndroid Build Coastguard Worker     }
128*bb4ee6a4SAndroid Build Coastguard Worker }
129*bb4ee6a4SAndroid Build Coastguard Worker 
130*bb4ee6a4SAndroid Build Coastguard Worker /// Helper function to determine the size in bytes of a dirty log bitmap for the given memory region
131*bb4ee6a4SAndroid Build Coastguard Worker /// size.
132*bb4ee6a4SAndroid Build Coastguard Worker ///
133*bb4ee6a4SAndroid Build Coastguard Worker /// # Arguments
134*bb4ee6a4SAndroid Build Coastguard Worker ///
135*bb4ee6a4SAndroid Build Coastguard Worker /// * `size` - Number of bytes in the memory region being queried.
dirty_log_bitmap_size(size: usize) -> usize136*bb4ee6a4SAndroid Build Coastguard Worker pub fn dirty_log_bitmap_size(size: usize) -> usize {
137*bb4ee6a4SAndroid Build Coastguard Worker     let page_size = pagesize();
138*bb4ee6a4SAndroid Build Coastguard Worker     (((size + page_size - 1) / page_size) + 7) / 8
139*bb4ee6a4SAndroid Build Coastguard Worker }
140*bb4ee6a4SAndroid Build Coastguard Worker 
141*bb4ee6a4SAndroid Build Coastguard Worker pub struct Kvm {
142*bb4ee6a4SAndroid Build Coastguard Worker     kvm: SafeDescriptor,
143*bb4ee6a4SAndroid Build Coastguard Worker     vcpu_mmap_size: usize,
144*bb4ee6a4SAndroid Build Coastguard Worker }
145*bb4ee6a4SAndroid Build Coastguard Worker 
146*bb4ee6a4SAndroid Build Coastguard Worker impl Kvm {
new_with_path(device_path: &Path) -> Result<Kvm>147*bb4ee6a4SAndroid Build Coastguard Worker     pub fn new_with_path(device_path: &Path) -> Result<Kvm> {
148*bb4ee6a4SAndroid Build Coastguard Worker         let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
149*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
150*bb4ee6a4SAndroid Build Coastguard Worker         // Open calls are safe because we give a nul-terminated string and verify the result.
151*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { open64(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
152*bb4ee6a4SAndroid Build Coastguard Worker         if ret < 0 {
153*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
154*bb4ee6a4SAndroid Build Coastguard Worker         }
155*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
156*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we verify that ret is valid and we own the fd.
157*bb4ee6a4SAndroid Build Coastguard Worker         let kvm = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
158*bb4ee6a4SAndroid Build Coastguard Worker 
159*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
160*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that the descriptor is valid and we verify the return result.
161*bb4ee6a4SAndroid Build Coastguard Worker         let version = unsafe { ioctl(&kvm, KVM_GET_API_VERSION) };
162*bb4ee6a4SAndroid Build Coastguard Worker         if version < 0 {
163*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
164*bb4ee6a4SAndroid Build Coastguard Worker         }
165*bb4ee6a4SAndroid Build Coastguard Worker 
166*bb4ee6a4SAndroid Build Coastguard Worker         // Per the kernel KVM API documentation: "Applications should refuse to run if
167*bb4ee6a4SAndroid Build Coastguard Worker         // KVM_GET_API_VERSION returns a value other than 12."
168*bb4ee6a4SAndroid Build Coastguard Worker         if version as u32 != KVM_API_VERSION {
169*bb4ee6a4SAndroid Build Coastguard Worker             error!(
170*bb4ee6a4SAndroid Build Coastguard Worker                 "KVM_GET_API_VERSION: expected {}, got {}",
171*bb4ee6a4SAndroid Build Coastguard Worker                 KVM_API_VERSION, version,
172*bb4ee6a4SAndroid Build Coastguard Worker             );
173*bb4ee6a4SAndroid Build Coastguard Worker             return Err(Error::new(ENOSYS));
174*bb4ee6a4SAndroid Build Coastguard Worker         }
175*bb4ee6a4SAndroid Build Coastguard Worker 
176*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
177*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a KVM fd and we verify the return result.
178*bb4ee6a4SAndroid Build Coastguard Worker         let res = unsafe { ioctl(&kvm, KVM_GET_VCPU_MMAP_SIZE) };
179*bb4ee6a4SAndroid Build Coastguard Worker         if res <= 0 {
180*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
181*bb4ee6a4SAndroid Build Coastguard Worker         }
182*bb4ee6a4SAndroid Build Coastguard Worker         let vcpu_mmap_size = res as usize;
183*bb4ee6a4SAndroid Build Coastguard Worker 
184*bb4ee6a4SAndroid Build Coastguard Worker         Ok(Kvm {
185*bb4ee6a4SAndroid Build Coastguard Worker             kvm,
186*bb4ee6a4SAndroid Build Coastguard Worker             vcpu_mmap_size,
187*bb4ee6a4SAndroid Build Coastguard Worker         })
188*bb4ee6a4SAndroid Build Coastguard Worker     }
189*bb4ee6a4SAndroid Build Coastguard Worker 
190*bb4ee6a4SAndroid Build Coastguard Worker     /// Opens `/dev/kvm` and returns a Kvm object on success.
new() -> Result<Kvm>191*bb4ee6a4SAndroid Build Coastguard Worker     pub fn new() -> Result<Kvm> {
192*bb4ee6a4SAndroid Build Coastguard Worker         Kvm::new_with_path(Path::new("/dev/kvm"))
193*bb4ee6a4SAndroid Build Coastguard Worker     }
194*bb4ee6a4SAndroid Build Coastguard Worker }
195*bb4ee6a4SAndroid Build Coastguard Worker 
196*bb4ee6a4SAndroid Build Coastguard Worker impl AsRawDescriptor for Kvm {
as_raw_descriptor(&self) -> RawDescriptor197*bb4ee6a4SAndroid Build Coastguard Worker     fn as_raw_descriptor(&self) -> RawDescriptor {
198*bb4ee6a4SAndroid Build Coastguard Worker         self.kvm.as_raw_descriptor()
199*bb4ee6a4SAndroid Build Coastguard Worker     }
200*bb4ee6a4SAndroid Build Coastguard Worker }
201*bb4ee6a4SAndroid Build Coastguard Worker 
202*bb4ee6a4SAndroid Build Coastguard Worker impl Hypervisor for Kvm {
try_clone(&self) -> Result<Self>203*bb4ee6a4SAndroid Build Coastguard Worker     fn try_clone(&self) -> Result<Self> {
204*bb4ee6a4SAndroid Build Coastguard Worker         Ok(Kvm {
205*bb4ee6a4SAndroid Build Coastguard Worker             kvm: self.kvm.try_clone()?,
206*bb4ee6a4SAndroid Build Coastguard Worker             vcpu_mmap_size: self.vcpu_mmap_size,
207*bb4ee6a4SAndroid Build Coastguard Worker         })
208*bb4ee6a4SAndroid Build Coastguard Worker     }
209*bb4ee6a4SAndroid Build Coastguard Worker 
check_capability(&self, cap: HypervisorCap) -> bool210*bb4ee6a4SAndroid Build Coastguard Worker     fn check_capability(&self, cap: HypervisorCap) -> bool {
211*bb4ee6a4SAndroid Build Coastguard Worker         if let Ok(kvm_cap) = KvmCap::try_from(cap) {
212*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
213*bb4ee6a4SAndroid Build Coastguard Worker             // this ioctl is safe because we know this kvm descriptor is valid,
214*bb4ee6a4SAndroid Build Coastguard Worker             // and we are copying over the kvm capability (u32) as a c_ulong value.
215*bb4ee6a4SAndroid Build Coastguard Worker             unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION, kvm_cap as c_ulong) == 1 }
216*bb4ee6a4SAndroid Build Coastguard Worker         } else {
217*bb4ee6a4SAndroid Build Coastguard Worker             // this capability cannot be converted on this platform, so return false
218*bb4ee6a4SAndroid Build Coastguard Worker             false
219*bb4ee6a4SAndroid Build Coastguard Worker         }
220*bb4ee6a4SAndroid Build Coastguard Worker     }
221*bb4ee6a4SAndroid Build Coastguard Worker }
222*bb4ee6a4SAndroid Build Coastguard Worker 
223*bb4ee6a4SAndroid Build Coastguard Worker /// A wrapper around creating and using a KVM VM.
224*bb4ee6a4SAndroid Build Coastguard Worker pub struct KvmVm {
225*bb4ee6a4SAndroid Build Coastguard Worker     kvm: Kvm,
226*bb4ee6a4SAndroid Build Coastguard Worker     vm: SafeDescriptor,
227*bb4ee6a4SAndroid Build Coastguard Worker     guest_mem: GuestMemory,
228*bb4ee6a4SAndroid Build Coastguard Worker     mem_regions: Arc<Mutex<BTreeMap<MemSlot, Box<dyn MappedRegion>>>>,
229*bb4ee6a4SAndroid Build Coastguard Worker     /// A min heap of MemSlot numbers that were used and then removed and can now be re-used
230*bb4ee6a4SAndroid Build Coastguard Worker     mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
231*bb4ee6a4SAndroid Build Coastguard Worker     cap_kvmclock_ctrl: bool,
232*bb4ee6a4SAndroid Build Coastguard Worker }
233*bb4ee6a4SAndroid Build Coastguard Worker 
234*bb4ee6a4SAndroid Build Coastguard Worker impl KvmVm {
235*bb4ee6a4SAndroid Build Coastguard Worker     /// Constructs a new `KvmVm` using the given `Kvm` instance.
new(kvm: &Kvm, guest_mem: GuestMemory, cfg: Config) -> Result<KvmVm>236*bb4ee6a4SAndroid Build Coastguard Worker     pub fn new(kvm: &Kvm, guest_mem: GuestMemory, cfg: Config) -> Result<KvmVm> {
237*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
238*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know kvm is a real kvm fd as this module is the only one that can make
239*bb4ee6a4SAndroid Build Coastguard Worker         // Kvm objects.
240*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe {
241*bb4ee6a4SAndroid Build Coastguard Worker             ioctl_with_val(
242*bb4ee6a4SAndroid Build Coastguard Worker                 kvm,
243*bb4ee6a4SAndroid Build Coastguard Worker                 KVM_CREATE_VM,
244*bb4ee6a4SAndroid Build Coastguard Worker                 kvm.get_vm_type(cfg.protection_type)? as c_ulong,
245*bb4ee6a4SAndroid Build Coastguard Worker             )
246*bb4ee6a4SAndroid Build Coastguard Worker         };
247*bb4ee6a4SAndroid Build Coastguard Worker         if ret < 0 {
248*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
249*bb4ee6a4SAndroid Build Coastguard Worker         }
250*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
251*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we verify that ret is valid and we own the fd.
252*bb4ee6a4SAndroid Build Coastguard Worker         let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
253*bb4ee6a4SAndroid Build Coastguard Worker         for region in guest_mem.regions() {
254*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
255*bb4ee6a4SAndroid Build Coastguard Worker             // Safe because the guest regions are guaranteed not to overlap.
256*bb4ee6a4SAndroid Build Coastguard Worker             unsafe {
257*bb4ee6a4SAndroid Build Coastguard Worker                 set_user_memory_region(
258*bb4ee6a4SAndroid Build Coastguard Worker                     &vm_descriptor,
259*bb4ee6a4SAndroid Build Coastguard Worker                     region.index as MemSlot,
260*bb4ee6a4SAndroid Build Coastguard Worker                     false,
261*bb4ee6a4SAndroid Build Coastguard Worker                     false,
262*bb4ee6a4SAndroid Build Coastguard Worker                     MemCacheType::CacheCoherent,
263*bb4ee6a4SAndroid Build Coastguard Worker                     region.guest_addr.offset(),
264*bb4ee6a4SAndroid Build Coastguard Worker                     region.size as u64,
265*bb4ee6a4SAndroid Build Coastguard Worker                     region.host_addr as *mut u8,
266*bb4ee6a4SAndroid Build Coastguard Worker                 )
267*bb4ee6a4SAndroid Build Coastguard Worker             }?;
268*bb4ee6a4SAndroid Build Coastguard Worker         }
269*bb4ee6a4SAndroid Build Coastguard Worker 
270*bb4ee6a4SAndroid Build Coastguard Worker         let mut vm = KvmVm {
271*bb4ee6a4SAndroid Build Coastguard Worker             kvm: kvm.try_clone()?,
272*bb4ee6a4SAndroid Build Coastguard Worker             vm: vm_descriptor,
273*bb4ee6a4SAndroid Build Coastguard Worker             guest_mem,
274*bb4ee6a4SAndroid Build Coastguard Worker             mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
275*bb4ee6a4SAndroid Build Coastguard Worker             mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
276*bb4ee6a4SAndroid Build Coastguard Worker             cap_kvmclock_ctrl: false,
277*bb4ee6a4SAndroid Build Coastguard Worker         };
278*bb4ee6a4SAndroid Build Coastguard Worker         vm.cap_kvmclock_ctrl = vm.check_raw_capability(KvmCap::KvmclockCtrl);
279*bb4ee6a4SAndroid Build Coastguard Worker         vm.init_arch(&cfg)?;
280*bb4ee6a4SAndroid Build Coastguard Worker         Ok(vm)
281*bb4ee6a4SAndroid Build Coastguard Worker     }
282*bb4ee6a4SAndroid Build Coastguard Worker 
create_kvm_vcpu(&self, id: usize) -> Result<KvmVcpu>283*bb4ee6a4SAndroid Build Coastguard Worker     pub fn create_kvm_vcpu(&self, id: usize) -> Result<KvmVcpu> {
284*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
285*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VM fd and we verify the return result.
286*bb4ee6a4SAndroid Build Coastguard Worker         let fd = unsafe { ioctl_with_val(self, KVM_CREATE_VCPU, c_ulong::try_from(id).unwrap()) };
287*bb4ee6a4SAndroid Build Coastguard Worker         if fd < 0 {
288*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
289*bb4ee6a4SAndroid Build Coastguard Worker         }
290*bb4ee6a4SAndroid Build Coastguard Worker 
291*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
292*bb4ee6a4SAndroid Build Coastguard Worker         // Wrap the vcpu now in case the following ? returns early. This is safe because we verified
293*bb4ee6a4SAndroid Build Coastguard Worker         // the value of the fd and we own the fd.
294*bb4ee6a4SAndroid Build Coastguard Worker         let vcpu = unsafe { File::from_raw_descriptor(fd) };
295*bb4ee6a4SAndroid Build Coastguard Worker 
296*bb4ee6a4SAndroid Build Coastguard Worker         // The VCPU mapping is held by an `Arc` inside `KvmVcpu`, and it can also be cloned by
297*bb4ee6a4SAndroid Build Coastguard Worker         // `signal_handle()` for use in `KvmVcpuSignalHandle`. The mapping will not be destroyed
298*bb4ee6a4SAndroid Build Coastguard Worker         // until all references are dropped, so it is safe to reference `kvm_run` fields via the
299*bb4ee6a4SAndroid Build Coastguard Worker         // `as_ptr()` function during either type's lifetime.
300*bb4ee6a4SAndroid Build Coastguard Worker         let run_mmap = MemoryMappingBuilder::new(self.kvm.vcpu_mmap_size)
301*bb4ee6a4SAndroid Build Coastguard Worker             .from_file(&vcpu)
302*bb4ee6a4SAndroid Build Coastguard Worker             .build()
303*bb4ee6a4SAndroid Build Coastguard Worker             .map_err(|_| Error::new(ENOSPC))?;
304*bb4ee6a4SAndroid Build Coastguard Worker 
305*bb4ee6a4SAndroid Build Coastguard Worker         Ok(KvmVcpu {
306*bb4ee6a4SAndroid Build Coastguard Worker             kvm: self.kvm.try_clone()?,
307*bb4ee6a4SAndroid Build Coastguard Worker             vm: self.vm.try_clone()?,
308*bb4ee6a4SAndroid Build Coastguard Worker             vcpu,
309*bb4ee6a4SAndroid Build Coastguard Worker             id,
310*bb4ee6a4SAndroid Build Coastguard Worker             cap_kvmclock_ctrl: self.cap_kvmclock_ctrl,
311*bb4ee6a4SAndroid Build Coastguard Worker             run_mmap: Arc::new(run_mmap),
312*bb4ee6a4SAndroid Build Coastguard Worker         })
313*bb4ee6a4SAndroid Build Coastguard Worker     }
314*bb4ee6a4SAndroid Build Coastguard Worker 
315*bb4ee6a4SAndroid Build Coastguard Worker     /// Creates an in kernel interrupt controller.
316*bb4ee6a4SAndroid Build Coastguard Worker     ///
317*bb4ee6a4SAndroid Build Coastguard Worker     /// See the documentation on the KVM_CREATE_IRQCHIP ioctl.
create_irq_chip(&self) -> Result<()>318*bb4ee6a4SAndroid Build Coastguard Worker     pub fn create_irq_chip(&self) -> Result<()> {
319*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
320*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VM fd and we verify the return result.
321*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP) };
322*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
323*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
324*bb4ee6a4SAndroid Build Coastguard Worker         } else {
325*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
326*bb4ee6a4SAndroid Build Coastguard Worker         }
327*bb4ee6a4SAndroid Build Coastguard Worker     }
328*bb4ee6a4SAndroid Build Coastguard Worker 
329*bb4ee6a4SAndroid Build Coastguard Worker     /// Sets the level on the given irq to 1 if `active` is true, and 0 otherwise.
set_irq_line(&self, irq: u32, active: bool) -> Result<()>330*bb4ee6a4SAndroid Build Coastguard Worker     pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> {
331*bb4ee6a4SAndroid Build Coastguard Worker         let mut irq_level = kvm_irq_level::default();
332*bb4ee6a4SAndroid Build Coastguard Worker         irq_level.__bindgen_anon_1.irq = irq;
333*bb4ee6a4SAndroid Build Coastguard Worker         irq_level.level = active.into();
334*bb4ee6a4SAndroid Build Coastguard Worker 
335*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
336*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VM fd, we know the kernel will only read the
337*bb4ee6a4SAndroid Build Coastguard Worker         // correct amount of memory from our pointer, and we verify the return result.
338*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl_with_ref(self, KVM_IRQ_LINE, &irq_level) };
339*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
340*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
341*bb4ee6a4SAndroid Build Coastguard Worker         } else {
342*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
343*bb4ee6a4SAndroid Build Coastguard Worker         }
344*bb4ee6a4SAndroid Build Coastguard Worker     }
345*bb4ee6a4SAndroid Build Coastguard Worker 
346*bb4ee6a4SAndroid Build Coastguard Worker     /// Registers an event that will, when signalled, trigger the `gsi` irq, and `resample_evt`
347*bb4ee6a4SAndroid Build Coastguard Worker     /// ( when not None ) will be triggered when the irqchip is resampled.
register_irqfd( &self, gsi: u32, evt: &Event, resample_evt: Option<&Event>, ) -> Result<()>348*bb4ee6a4SAndroid Build Coastguard Worker     pub fn register_irqfd(
349*bb4ee6a4SAndroid Build Coastguard Worker         &self,
350*bb4ee6a4SAndroid Build Coastguard Worker         gsi: u32,
351*bb4ee6a4SAndroid Build Coastguard Worker         evt: &Event,
352*bb4ee6a4SAndroid Build Coastguard Worker         resample_evt: Option<&Event>,
353*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
354*bb4ee6a4SAndroid Build Coastguard Worker         let mut irqfd = kvm_irqfd {
355*bb4ee6a4SAndroid Build Coastguard Worker             fd: evt.as_raw_descriptor() as u32,
356*bb4ee6a4SAndroid Build Coastguard Worker             gsi,
357*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
358*bb4ee6a4SAndroid Build Coastguard Worker         };
359*bb4ee6a4SAndroid Build Coastguard Worker 
360*bb4ee6a4SAndroid Build Coastguard Worker         if let Some(r_evt) = resample_evt {
361*bb4ee6a4SAndroid Build Coastguard Worker             irqfd.flags = KVM_IRQFD_FLAG_RESAMPLE;
362*bb4ee6a4SAndroid Build Coastguard Worker             irqfd.resamplefd = r_evt.as_raw_descriptor() as u32;
363*bb4ee6a4SAndroid Build Coastguard Worker         }
364*bb4ee6a4SAndroid Build Coastguard Worker 
365*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
366*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VM fd, we know the kernel will only read the
367*bb4ee6a4SAndroid Build Coastguard Worker         // correct amount of memory from our pointer, and we verify the return result.
368*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD, &irqfd) };
369*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
370*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
371*bb4ee6a4SAndroid Build Coastguard Worker         } else {
372*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
373*bb4ee6a4SAndroid Build Coastguard Worker         }
374*bb4ee6a4SAndroid Build Coastguard Worker     }
375*bb4ee6a4SAndroid Build Coastguard Worker 
376*bb4ee6a4SAndroid Build Coastguard Worker     /// Unregisters an event that was previously registered with
377*bb4ee6a4SAndroid Build Coastguard Worker     /// `register_irqfd`.
378*bb4ee6a4SAndroid Build Coastguard Worker     ///
379*bb4ee6a4SAndroid Build Coastguard Worker     /// The `evt` and `gsi` pair must be the same as the ones passed into
380*bb4ee6a4SAndroid Build Coastguard Worker     /// `register_irqfd`.
unregister_irqfd(&self, gsi: u32, evt: &Event) -> Result<()>381*bb4ee6a4SAndroid Build Coastguard Worker     pub fn unregister_irqfd(&self, gsi: u32, evt: &Event) -> Result<()> {
382*bb4ee6a4SAndroid Build Coastguard Worker         let irqfd = kvm_irqfd {
383*bb4ee6a4SAndroid Build Coastguard Worker             fd: evt.as_raw_descriptor() as u32,
384*bb4ee6a4SAndroid Build Coastguard Worker             gsi,
385*bb4ee6a4SAndroid Build Coastguard Worker             flags: KVM_IRQFD_FLAG_DEASSIGN,
386*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
387*bb4ee6a4SAndroid Build Coastguard Worker         };
388*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
389*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VM fd, we know the kernel will only read the
390*bb4ee6a4SAndroid Build Coastguard Worker         // correct amount of memory from our pointer, and we verify the return result.
391*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD, &irqfd) };
392*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
393*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
394*bb4ee6a4SAndroid Build Coastguard Worker         } else {
395*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
396*bb4ee6a4SAndroid Build Coastguard Worker         }
397*bb4ee6a4SAndroid Build Coastguard Worker     }
398*bb4ee6a4SAndroid Build Coastguard Worker 
399*bb4ee6a4SAndroid Build Coastguard Worker     /// Sets the GSI routing table, replacing any table set with previous calls to
400*bb4ee6a4SAndroid Build Coastguard Worker     /// `set_gsi_routing`.
set_gsi_routing(&self, routes: &[IrqRoute]) -> Result<()>401*bb4ee6a4SAndroid Build Coastguard Worker     pub fn set_gsi_routing(&self, routes: &[IrqRoute]) -> Result<()> {
402*bb4ee6a4SAndroid Build Coastguard Worker         let mut irq_routing =
403*bb4ee6a4SAndroid Build Coastguard Worker             vec_with_array_field::<kvm_irq_routing, kvm_irq_routing_entry>(routes.len());
404*bb4ee6a4SAndroid Build Coastguard Worker         irq_routing[0].nr = routes.len() as u32;
405*bb4ee6a4SAndroid Build Coastguard Worker 
406*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
407*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we ensured there is enough space in irq_routing to hold the number of
408*bb4ee6a4SAndroid Build Coastguard Worker         // route entries.
409*bb4ee6a4SAndroid Build Coastguard Worker         let irq_routes = unsafe { irq_routing[0].entries.as_mut_slice(routes.len()) };
410*bb4ee6a4SAndroid Build Coastguard Worker         for (route, irq_route) in routes.iter().zip(irq_routes.iter_mut()) {
411*bb4ee6a4SAndroid Build Coastguard Worker             *irq_route = kvm_irq_routing_entry::from(route);
412*bb4ee6a4SAndroid Build Coastguard Worker         }
413*bb4ee6a4SAndroid Build Coastguard Worker 
414*bb4ee6a4SAndroid Build Coastguard Worker         // TODO(b/315998194): Add safety comment
415*bb4ee6a4SAndroid Build Coastguard Worker         #[allow(clippy::undocumented_unsafe_blocks)]
416*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl_with_ref(self, KVM_SET_GSI_ROUTING, &irq_routing[0]) };
417*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
418*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
419*bb4ee6a4SAndroid Build Coastguard Worker         } else {
420*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
421*bb4ee6a4SAndroid Build Coastguard Worker         }
422*bb4ee6a4SAndroid Build Coastguard Worker     }
423*bb4ee6a4SAndroid Build Coastguard Worker 
ioeventfd( &self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, deassign: bool, ) -> Result<()>424*bb4ee6a4SAndroid Build Coastguard Worker     fn ioeventfd(
425*bb4ee6a4SAndroid Build Coastguard Worker         &self,
426*bb4ee6a4SAndroid Build Coastguard Worker         evt: &Event,
427*bb4ee6a4SAndroid Build Coastguard Worker         addr: IoEventAddress,
428*bb4ee6a4SAndroid Build Coastguard Worker         datamatch: Datamatch,
429*bb4ee6a4SAndroid Build Coastguard Worker         deassign: bool,
430*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
431*bb4ee6a4SAndroid Build Coastguard Worker         let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
432*bb4ee6a4SAndroid Build Coastguard Worker             Datamatch::AnyLength => (false, 0, 0),
433*bb4ee6a4SAndroid Build Coastguard Worker             Datamatch::U8(v) => match v {
434*bb4ee6a4SAndroid Build Coastguard Worker                 Some(u) => (true, u as u64, 1),
435*bb4ee6a4SAndroid Build Coastguard Worker                 None => (false, 0, 1),
436*bb4ee6a4SAndroid Build Coastguard Worker             },
437*bb4ee6a4SAndroid Build Coastguard Worker             Datamatch::U16(v) => match v {
438*bb4ee6a4SAndroid Build Coastguard Worker                 Some(u) => (true, u as u64, 2),
439*bb4ee6a4SAndroid Build Coastguard Worker                 None => (false, 0, 2),
440*bb4ee6a4SAndroid Build Coastguard Worker             },
441*bb4ee6a4SAndroid Build Coastguard Worker             Datamatch::U32(v) => match v {
442*bb4ee6a4SAndroid Build Coastguard Worker                 Some(u) => (true, u as u64, 4),
443*bb4ee6a4SAndroid Build Coastguard Worker                 None => (false, 0, 4),
444*bb4ee6a4SAndroid Build Coastguard Worker             },
445*bb4ee6a4SAndroid Build Coastguard Worker             Datamatch::U64(v) => match v {
446*bb4ee6a4SAndroid Build Coastguard Worker                 Some(u) => (true, u, 8),
447*bb4ee6a4SAndroid Build Coastguard Worker                 None => (false, 0, 8),
448*bb4ee6a4SAndroid Build Coastguard Worker             },
449*bb4ee6a4SAndroid Build Coastguard Worker         };
450*bb4ee6a4SAndroid Build Coastguard Worker         let mut flags = 0;
451*bb4ee6a4SAndroid Build Coastguard Worker         if deassign {
452*bb4ee6a4SAndroid Build Coastguard Worker             flags |= 1 << kvm_ioeventfd_flag_nr_deassign;
453*bb4ee6a4SAndroid Build Coastguard Worker         }
454*bb4ee6a4SAndroid Build Coastguard Worker         if do_datamatch {
455*bb4ee6a4SAndroid Build Coastguard Worker             flags |= 1 << kvm_ioeventfd_flag_nr_datamatch
456*bb4ee6a4SAndroid Build Coastguard Worker         }
457*bb4ee6a4SAndroid Build Coastguard Worker         if let IoEventAddress::Pio(_) = addr {
458*bb4ee6a4SAndroid Build Coastguard Worker             flags |= 1 << kvm_ioeventfd_flag_nr_pio;
459*bb4ee6a4SAndroid Build Coastguard Worker         }
460*bb4ee6a4SAndroid Build Coastguard Worker         let ioeventfd = kvm_ioeventfd {
461*bb4ee6a4SAndroid Build Coastguard Worker             datamatch: datamatch_value,
462*bb4ee6a4SAndroid Build Coastguard Worker             len: datamatch_len,
463*bb4ee6a4SAndroid Build Coastguard Worker             addr: match addr {
464*bb4ee6a4SAndroid Build Coastguard Worker                 IoEventAddress::Pio(p) => p,
465*bb4ee6a4SAndroid Build Coastguard Worker                 IoEventAddress::Mmio(m) => m,
466*bb4ee6a4SAndroid Build Coastguard Worker             },
467*bb4ee6a4SAndroid Build Coastguard Worker             fd: evt.as_raw_descriptor(),
468*bb4ee6a4SAndroid Build Coastguard Worker             flags,
469*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
470*bb4ee6a4SAndroid Build Coastguard Worker         };
471*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
472*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VM fd, we know the kernel will only read the
473*bb4ee6a4SAndroid Build Coastguard Worker         // correct amount of memory from our pointer, and we verify the return result.
474*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD, &ioeventfd) };
475*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
476*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
477*bb4ee6a4SAndroid Build Coastguard Worker         } else {
478*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
479*bb4ee6a4SAndroid Build Coastguard Worker         }
480*bb4ee6a4SAndroid Build Coastguard Worker     }
481*bb4ee6a4SAndroid Build Coastguard Worker 
482*bb4ee6a4SAndroid Build Coastguard Worker     /// Checks whether a particular KVM-specific capability is available for this VM.
check_raw_capability(&self, capability: KvmCap) -> bool483*bb4ee6a4SAndroid Build Coastguard Worker     pub fn check_raw_capability(&self, capability: KvmCap) -> bool {
484*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
485*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a KVM fd, and if the cap is invalid KVM assumes
486*bb4ee6a4SAndroid Build Coastguard Worker         // it's an unavailable extension and returns 0.
487*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION, capability as c_ulong) };
488*bb4ee6a4SAndroid Build Coastguard Worker         match capability {
489*bb4ee6a4SAndroid Build Coastguard Worker             #[cfg(target_arch = "x86_64")]
490*bb4ee6a4SAndroid Build Coastguard Worker             KvmCap::BusLockDetect => {
491*bb4ee6a4SAndroid Build Coastguard Worker                 if ret > 0 {
492*bb4ee6a4SAndroid Build Coastguard Worker                     ret as u32 & KVM_BUS_LOCK_DETECTION_EXIT == KVM_BUS_LOCK_DETECTION_EXIT
493*bb4ee6a4SAndroid Build Coastguard Worker                 } else {
494*bb4ee6a4SAndroid Build Coastguard Worker                     false
495*bb4ee6a4SAndroid Build Coastguard Worker                 }
496*bb4ee6a4SAndroid Build Coastguard Worker             }
497*bb4ee6a4SAndroid Build Coastguard Worker             _ => ret == 1,
498*bb4ee6a4SAndroid Build Coastguard Worker         }
499*bb4ee6a4SAndroid Build Coastguard Worker     }
500*bb4ee6a4SAndroid Build Coastguard Worker 
501*bb4ee6a4SAndroid Build Coastguard Worker     // Currently only used on aarch64, but works on any architecture.
502*bb4ee6a4SAndroid Build Coastguard Worker     #[allow(dead_code)]
503*bb4ee6a4SAndroid Build Coastguard Worker     /// Enables a KVM-specific capability for this VM, with the given arguments.
504*bb4ee6a4SAndroid Build Coastguard Worker     ///
505*bb4ee6a4SAndroid Build Coastguard Worker     /// # Safety
506*bb4ee6a4SAndroid Build Coastguard Worker     /// This function is marked as unsafe because `args` may be interpreted as pointers for some
507*bb4ee6a4SAndroid Build Coastguard Worker     /// capabilities. The caller must ensure that any pointers passed in the `args` array are
508*bb4ee6a4SAndroid Build Coastguard Worker     /// allocated as the kernel expects, and that mutable pointers are owned.
enable_raw_capability( &self, capability: KvmCap, flags: u32, args: &[u64; 4], ) -> Result<()>509*bb4ee6a4SAndroid Build Coastguard Worker     unsafe fn enable_raw_capability(
510*bb4ee6a4SAndroid Build Coastguard Worker         &self,
511*bb4ee6a4SAndroid Build Coastguard Worker         capability: KvmCap,
512*bb4ee6a4SAndroid Build Coastguard Worker         flags: u32,
513*bb4ee6a4SAndroid Build Coastguard Worker         args: &[u64; 4],
514*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
515*bb4ee6a4SAndroid Build Coastguard Worker         let kvm_cap = kvm_enable_cap {
516*bb4ee6a4SAndroid Build Coastguard Worker             cap: capability as u32,
517*bb4ee6a4SAndroid Build Coastguard Worker             args: *args,
518*bb4ee6a4SAndroid Build Coastguard Worker             flags,
519*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
520*bb4ee6a4SAndroid Build Coastguard Worker         };
521*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
522*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we allocated the struct and we know the kernel will read exactly the size of
523*bb4ee6a4SAndroid Build Coastguard Worker         // the struct, and because we assume the caller has allocated the args appropriately.
524*bb4ee6a4SAndroid Build Coastguard Worker         let ret = ioctl_with_ref(self, KVM_ENABLE_CAP, &kvm_cap);
525*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
526*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
527*bb4ee6a4SAndroid Build Coastguard Worker         } else {
528*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
529*bb4ee6a4SAndroid Build Coastguard Worker         }
530*bb4ee6a4SAndroid Build Coastguard Worker     }
531*bb4ee6a4SAndroid Build Coastguard Worker 
handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()>532*bb4ee6a4SAndroid Build Coastguard Worker     fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()> {
533*bb4ee6a4SAndroid Build Coastguard Worker         match self.guest_mem.remove_range(guest_address, size) {
534*bb4ee6a4SAndroid Build Coastguard Worker             Ok(_) => Ok(()),
535*bb4ee6a4SAndroid Build Coastguard Worker             Err(vm_memory::Error::MemoryAccess(_, MmapError::SystemCallFailed(e))) => Err(e),
536*bb4ee6a4SAndroid Build Coastguard Worker             Err(_) => Err(Error::new(EIO)),
537*bb4ee6a4SAndroid Build Coastguard Worker         }
538*bb4ee6a4SAndroid Build Coastguard Worker     }
539*bb4ee6a4SAndroid Build Coastguard Worker 
handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()>540*bb4ee6a4SAndroid Build Coastguard Worker     fn handle_deflate(&mut self, _guest_address: GuestAddress, _size: u64) -> Result<()> {
541*bb4ee6a4SAndroid Build Coastguard Worker         // No-op, when the guest attempts to access the pages again, Linux/KVM will provide them.
542*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
543*bb4ee6a4SAndroid Build Coastguard Worker     }
544*bb4ee6a4SAndroid Build Coastguard Worker }
545*bb4ee6a4SAndroid Build Coastguard Worker 
546*bb4ee6a4SAndroid Build Coastguard Worker impl Vm for KvmVm {
try_clone(&self) -> Result<Self>547*bb4ee6a4SAndroid Build Coastguard Worker     fn try_clone(&self) -> Result<Self> {
548*bb4ee6a4SAndroid Build Coastguard Worker         Ok(KvmVm {
549*bb4ee6a4SAndroid Build Coastguard Worker             kvm: self.kvm.try_clone()?,
550*bb4ee6a4SAndroid Build Coastguard Worker             vm: self.vm.try_clone()?,
551*bb4ee6a4SAndroid Build Coastguard Worker             guest_mem: self.guest_mem.clone(),
552*bb4ee6a4SAndroid Build Coastguard Worker             mem_regions: self.mem_regions.clone(),
553*bb4ee6a4SAndroid Build Coastguard Worker             mem_slot_gaps: self.mem_slot_gaps.clone(),
554*bb4ee6a4SAndroid Build Coastguard Worker             cap_kvmclock_ctrl: self.cap_kvmclock_ctrl,
555*bb4ee6a4SAndroid Build Coastguard Worker         })
556*bb4ee6a4SAndroid Build Coastguard Worker     }
557*bb4ee6a4SAndroid Build Coastguard Worker 
check_capability(&self, c: VmCap) -> bool558*bb4ee6a4SAndroid Build Coastguard Worker     fn check_capability(&self, c: VmCap) -> bool {
559*bb4ee6a4SAndroid Build Coastguard Worker         if let Some(val) = self.check_capability_arch(c) {
560*bb4ee6a4SAndroid Build Coastguard Worker             return val;
561*bb4ee6a4SAndroid Build Coastguard Worker         }
562*bb4ee6a4SAndroid Build Coastguard Worker         match c {
563*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::DirtyLog => true,
564*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::PvClock => false,
565*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::Protected => self.check_raw_capability(KvmCap::ArmProtectedVm),
566*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::EarlyInitCpuid => false,
567*bb4ee6a4SAndroid Build Coastguard Worker             #[cfg(target_arch = "x86_64")]
568*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::BusLockDetect => self.check_raw_capability(KvmCap::BusLockDetect),
569*bb4ee6a4SAndroid Build Coastguard Worker             // When pKVM is the hypervisor, read-only memslots aren't supported, even for
570*bb4ee6a4SAndroid Build Coastguard Worker             // non-protected VMs.
571*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::ReadOnlyMemoryRegion => !self.is_pkvm(),
572*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::MemNoncoherentDma => {
573*bb4ee6a4SAndroid Build Coastguard Worker                 cfg!(feature = "noncoherent-dma")
574*bb4ee6a4SAndroid Build Coastguard Worker                     && self.check_raw_capability(KvmCap::MemNoncoherentDma)
575*bb4ee6a4SAndroid Build Coastguard Worker             }
576*bb4ee6a4SAndroid Build Coastguard Worker         }
577*bb4ee6a4SAndroid Build Coastguard Worker     }
578*bb4ee6a4SAndroid Build Coastguard Worker 
enable_capability(&self, c: VmCap, _flags: u32) -> Result<bool>579*bb4ee6a4SAndroid Build Coastguard Worker     fn enable_capability(&self, c: VmCap, _flags: u32) -> Result<bool> {
580*bb4ee6a4SAndroid Build Coastguard Worker         match c {
581*bb4ee6a4SAndroid Build Coastguard Worker             #[cfg(target_arch = "x86_64")]
582*bb4ee6a4SAndroid Build Coastguard Worker             VmCap::BusLockDetect => {
583*bb4ee6a4SAndroid Build Coastguard Worker                 let args = [KVM_BUS_LOCK_DETECTION_EXIT as u64, 0, 0, 0];
584*bb4ee6a4SAndroid Build Coastguard Worker                 Ok(
585*bb4ee6a4SAndroid Build Coastguard Worker                     // TODO(b/315998194): Add safety comment
586*bb4ee6a4SAndroid Build Coastguard Worker                     #[allow(clippy::undocumented_unsafe_blocks)]
587*bb4ee6a4SAndroid Build Coastguard Worker                     unsafe {
588*bb4ee6a4SAndroid Build Coastguard Worker                         self.enable_raw_capability(KvmCap::BusLockDetect, _flags, &args) == Ok(())
589*bb4ee6a4SAndroid Build Coastguard Worker                     },
590*bb4ee6a4SAndroid Build Coastguard Worker                 )
591*bb4ee6a4SAndroid Build Coastguard Worker             }
592*bb4ee6a4SAndroid Build Coastguard Worker             _ => Ok(false),
593*bb4ee6a4SAndroid Build Coastguard Worker         }
594*bb4ee6a4SAndroid Build Coastguard Worker     }
595*bb4ee6a4SAndroid Build Coastguard Worker 
get_guest_phys_addr_bits(&self) -> u8596*bb4ee6a4SAndroid Build Coastguard Worker     fn get_guest_phys_addr_bits(&self) -> u8 {
597*bb4ee6a4SAndroid Build Coastguard Worker         self.kvm.get_guest_phys_addr_bits()
598*bb4ee6a4SAndroid Build Coastguard Worker     }
599*bb4ee6a4SAndroid Build Coastguard Worker 
get_memory(&self) -> &GuestMemory600*bb4ee6a4SAndroid Build Coastguard Worker     fn get_memory(&self) -> &GuestMemory {
601*bb4ee6a4SAndroid Build Coastguard Worker         &self.guest_mem
602*bb4ee6a4SAndroid Build Coastguard Worker     }
603*bb4ee6a4SAndroid Build Coastguard Worker 
add_memory_region( &mut self, guest_addr: GuestAddress, mem: Box<dyn MappedRegion>, read_only: bool, log_dirty_pages: bool, cache: MemCacheType, ) -> Result<MemSlot>604*bb4ee6a4SAndroid Build Coastguard Worker     fn add_memory_region(
605*bb4ee6a4SAndroid Build Coastguard Worker         &mut self,
606*bb4ee6a4SAndroid Build Coastguard Worker         guest_addr: GuestAddress,
607*bb4ee6a4SAndroid Build Coastguard Worker         mem: Box<dyn MappedRegion>,
608*bb4ee6a4SAndroid Build Coastguard Worker         read_only: bool,
609*bb4ee6a4SAndroid Build Coastguard Worker         log_dirty_pages: bool,
610*bb4ee6a4SAndroid Build Coastguard Worker         cache: MemCacheType,
611*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<MemSlot> {
612*bb4ee6a4SAndroid Build Coastguard Worker         let pgsz = pagesize() as u64;
613*bb4ee6a4SAndroid Build Coastguard Worker         // KVM require to set the user memory region with page size aligned size. Safe to extend
614*bb4ee6a4SAndroid Build Coastguard Worker         // the mem.size() to be page size aligned because the mmap will round up the size to be
615*bb4ee6a4SAndroid Build Coastguard Worker         // page size aligned if it is not.
616*bb4ee6a4SAndroid Build Coastguard Worker         let size = (mem.size() as u64 + pgsz - 1) / pgsz * pgsz;
617*bb4ee6a4SAndroid Build Coastguard Worker         let end_addr = guest_addr
618*bb4ee6a4SAndroid Build Coastguard Worker             .checked_add(size)
619*bb4ee6a4SAndroid Build Coastguard Worker             .ok_or_else(|| Error::new(EOVERFLOW))?;
620*bb4ee6a4SAndroid Build Coastguard Worker         if self.guest_mem.range_overlap(guest_addr, end_addr) {
621*bb4ee6a4SAndroid Build Coastguard Worker             return Err(Error::new(ENOSPC));
622*bb4ee6a4SAndroid Build Coastguard Worker         }
623*bb4ee6a4SAndroid Build Coastguard Worker         let mut regions = self.mem_regions.lock();
624*bb4ee6a4SAndroid Build Coastguard Worker         let mut gaps = self.mem_slot_gaps.lock();
625*bb4ee6a4SAndroid Build Coastguard Worker         let slot = match gaps.pop() {
626*bb4ee6a4SAndroid Build Coastguard Worker             Some(gap) => gap.0,
627*bb4ee6a4SAndroid Build Coastguard Worker             None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
628*bb4ee6a4SAndroid Build Coastguard Worker         };
629*bb4ee6a4SAndroid Build Coastguard Worker 
630*bb4ee6a4SAndroid Build Coastguard Worker         let cache_type = if self.check_capability(VmCap::MemNoncoherentDma) {
631*bb4ee6a4SAndroid Build Coastguard Worker             cache
632*bb4ee6a4SAndroid Build Coastguard Worker         } else {
633*bb4ee6a4SAndroid Build Coastguard Worker             MemCacheType::CacheCoherent
634*bb4ee6a4SAndroid Build Coastguard Worker         };
635*bb4ee6a4SAndroid Build Coastguard Worker 
636*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
637*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we check that the given guest address is valid and has no overlaps. We also
638*bb4ee6a4SAndroid Build Coastguard Worker         // know that the pointer and size are correct because the MemoryMapping interface ensures
639*bb4ee6a4SAndroid Build Coastguard Worker         // this. We take ownership of the memory mapping so that it won't be unmapped until the slot
640*bb4ee6a4SAndroid Build Coastguard Worker         // is removed.
641*bb4ee6a4SAndroid Build Coastguard Worker         let res = unsafe {
642*bb4ee6a4SAndroid Build Coastguard Worker             set_user_memory_region(
643*bb4ee6a4SAndroid Build Coastguard Worker                 &self.vm,
644*bb4ee6a4SAndroid Build Coastguard Worker                 slot,
645*bb4ee6a4SAndroid Build Coastguard Worker                 read_only,
646*bb4ee6a4SAndroid Build Coastguard Worker                 log_dirty_pages,
647*bb4ee6a4SAndroid Build Coastguard Worker                 cache_type,
648*bb4ee6a4SAndroid Build Coastguard Worker                 guest_addr.offset(),
649*bb4ee6a4SAndroid Build Coastguard Worker                 size,
650*bb4ee6a4SAndroid Build Coastguard Worker                 mem.as_ptr(),
651*bb4ee6a4SAndroid Build Coastguard Worker             )
652*bb4ee6a4SAndroid Build Coastguard Worker         };
653*bb4ee6a4SAndroid Build Coastguard Worker 
654*bb4ee6a4SAndroid Build Coastguard Worker         if let Err(e) = res {
655*bb4ee6a4SAndroid Build Coastguard Worker             gaps.push(Reverse(slot));
656*bb4ee6a4SAndroid Build Coastguard Worker             return Err(e);
657*bb4ee6a4SAndroid Build Coastguard Worker         }
658*bb4ee6a4SAndroid Build Coastguard Worker         regions.insert(slot, mem);
659*bb4ee6a4SAndroid Build Coastguard Worker         Ok(slot)
660*bb4ee6a4SAndroid Build Coastguard Worker     }
661*bb4ee6a4SAndroid Build Coastguard Worker 
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>662*bb4ee6a4SAndroid Build Coastguard Worker     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
663*bb4ee6a4SAndroid Build Coastguard Worker         let mut regions = self.mem_regions.lock();
664*bb4ee6a4SAndroid Build Coastguard Worker         let mem = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
665*bb4ee6a4SAndroid Build Coastguard Worker 
666*bb4ee6a4SAndroid Build Coastguard Worker         mem.msync(offset, size).map_err(|err| match err {
667*bb4ee6a4SAndroid Build Coastguard Worker             MmapError::InvalidAddress => Error::new(EFAULT),
668*bb4ee6a4SAndroid Build Coastguard Worker             MmapError::NotPageAligned => Error::new(EINVAL),
669*bb4ee6a4SAndroid Build Coastguard Worker             MmapError::SystemCallFailed(e) => e,
670*bb4ee6a4SAndroid Build Coastguard Worker             _ => Error::new(EIO),
671*bb4ee6a4SAndroid Build Coastguard Worker         })
672*bb4ee6a4SAndroid Build Coastguard Worker     }
673*bb4ee6a4SAndroid Build Coastguard Worker 
madvise_pageout_memory_region( &mut self, slot: MemSlot, offset: usize, size: usize, ) -> Result<()>674*bb4ee6a4SAndroid Build Coastguard Worker     fn madvise_pageout_memory_region(
675*bb4ee6a4SAndroid Build Coastguard Worker         &mut self,
676*bb4ee6a4SAndroid Build Coastguard Worker         slot: MemSlot,
677*bb4ee6a4SAndroid Build Coastguard Worker         offset: usize,
678*bb4ee6a4SAndroid Build Coastguard Worker         size: usize,
679*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
680*bb4ee6a4SAndroid Build Coastguard Worker         let mut regions = self.mem_regions.lock();
681*bb4ee6a4SAndroid Build Coastguard Worker         let mem = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
682*bb4ee6a4SAndroid Build Coastguard Worker 
683*bb4ee6a4SAndroid Build Coastguard Worker         mem.madvise(offset, size, libc::MADV_PAGEOUT)
684*bb4ee6a4SAndroid Build Coastguard Worker             .map_err(|err| match err {
685*bb4ee6a4SAndroid Build Coastguard Worker                 MmapError::InvalidAddress => Error::new(EFAULT),
686*bb4ee6a4SAndroid Build Coastguard Worker                 MmapError::NotPageAligned => Error::new(EINVAL),
687*bb4ee6a4SAndroid Build Coastguard Worker                 MmapError::SystemCallFailed(e) => e,
688*bb4ee6a4SAndroid Build Coastguard Worker                 _ => Error::new(EIO),
689*bb4ee6a4SAndroid Build Coastguard Worker             })
690*bb4ee6a4SAndroid Build Coastguard Worker     }
691*bb4ee6a4SAndroid Build Coastguard Worker 
madvise_remove_memory_region( &mut self, slot: MemSlot, offset: usize, size: usize, ) -> Result<()>692*bb4ee6a4SAndroid Build Coastguard Worker     fn madvise_remove_memory_region(
693*bb4ee6a4SAndroid Build Coastguard Worker         &mut self,
694*bb4ee6a4SAndroid Build Coastguard Worker         slot: MemSlot,
695*bb4ee6a4SAndroid Build Coastguard Worker         offset: usize,
696*bb4ee6a4SAndroid Build Coastguard Worker         size: usize,
697*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
698*bb4ee6a4SAndroid Build Coastguard Worker         let mut regions = self.mem_regions.lock();
699*bb4ee6a4SAndroid Build Coastguard Worker         let mem = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
700*bb4ee6a4SAndroid Build Coastguard Worker 
701*bb4ee6a4SAndroid Build Coastguard Worker         mem.madvise(offset, size, libc::MADV_REMOVE)
702*bb4ee6a4SAndroid Build Coastguard Worker             .map_err(|err| match err {
703*bb4ee6a4SAndroid Build Coastguard Worker                 MmapError::InvalidAddress => Error::new(EFAULT),
704*bb4ee6a4SAndroid Build Coastguard Worker                 MmapError::NotPageAligned => Error::new(EINVAL),
705*bb4ee6a4SAndroid Build Coastguard Worker                 MmapError::SystemCallFailed(e) => e,
706*bb4ee6a4SAndroid Build Coastguard Worker                 _ => Error::new(EIO),
707*bb4ee6a4SAndroid Build Coastguard Worker             })
708*bb4ee6a4SAndroid Build Coastguard Worker     }
709*bb4ee6a4SAndroid Build Coastguard Worker 
remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>710*bb4ee6a4SAndroid Build Coastguard Worker     fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
711*bb4ee6a4SAndroid Build Coastguard Worker         let mut regions = self.mem_regions.lock();
712*bb4ee6a4SAndroid Build Coastguard Worker         if !regions.contains_key(&slot) {
713*bb4ee6a4SAndroid Build Coastguard Worker             return Err(Error::new(ENOENT));
714*bb4ee6a4SAndroid Build Coastguard Worker         }
715*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
716*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because the slot is checked against the list of memory slots.
717*bb4ee6a4SAndroid Build Coastguard Worker         unsafe {
718*bb4ee6a4SAndroid Build Coastguard Worker             set_user_memory_region(
719*bb4ee6a4SAndroid Build Coastguard Worker                 &self.vm,
720*bb4ee6a4SAndroid Build Coastguard Worker                 slot,
721*bb4ee6a4SAndroid Build Coastguard Worker                 false,
722*bb4ee6a4SAndroid Build Coastguard Worker                 false,
723*bb4ee6a4SAndroid Build Coastguard Worker                 MemCacheType::CacheCoherent,
724*bb4ee6a4SAndroid Build Coastguard Worker                 0,
725*bb4ee6a4SAndroid Build Coastguard Worker                 0,
726*bb4ee6a4SAndroid Build Coastguard Worker                 std::ptr::null_mut(),
727*bb4ee6a4SAndroid Build Coastguard Worker             )?;
728*bb4ee6a4SAndroid Build Coastguard Worker         }
729*bb4ee6a4SAndroid Build Coastguard Worker         self.mem_slot_gaps.lock().push(Reverse(slot));
730*bb4ee6a4SAndroid Build Coastguard Worker         // This remove will always succeed because of the contains_key check above.
731*bb4ee6a4SAndroid Build Coastguard Worker         Ok(regions.remove(&slot).unwrap())
732*bb4ee6a4SAndroid Build Coastguard Worker     }
733*bb4ee6a4SAndroid Build Coastguard Worker 
create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>734*bb4ee6a4SAndroid Build Coastguard Worker     fn create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor> {
735*bb4ee6a4SAndroid Build Coastguard Worker         let mut device = if let Some(dev) = self.get_device_params_arch(kind) {
736*bb4ee6a4SAndroid Build Coastguard Worker             dev
737*bb4ee6a4SAndroid Build Coastguard Worker         } else {
738*bb4ee6a4SAndroid Build Coastguard Worker             match kind {
739*bb4ee6a4SAndroid Build Coastguard Worker                 DeviceKind::Vfio => kvm_create_device {
740*bb4ee6a4SAndroid Build Coastguard Worker                     type_: kvm_device_type_KVM_DEV_TYPE_VFIO,
741*bb4ee6a4SAndroid Build Coastguard Worker                     fd: 0,
742*bb4ee6a4SAndroid Build Coastguard Worker                     flags: 0,
743*bb4ee6a4SAndroid Build Coastguard Worker                 },
744*bb4ee6a4SAndroid Build Coastguard Worker 
745*bb4ee6a4SAndroid Build Coastguard Worker                 // ARM and risc-v have additional DeviceKinds, so it needs the catch-all pattern
746*bb4ee6a4SAndroid Build Coastguard Worker                 #[cfg(any(target_arch = "arm", target_arch = "aarch64", target_arch = "riscv64"))]
747*bb4ee6a4SAndroid Build Coastguard Worker                 _ => return Err(Error::new(libc::ENXIO)),
748*bb4ee6a4SAndroid Build Coastguard Worker             }
749*bb4ee6a4SAndroid Build Coastguard Worker         };
750*bb4ee6a4SAndroid Build Coastguard Worker 
751*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
752*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VM fd, we know the kernel will only write correct
753*bb4ee6a4SAndroid Build Coastguard Worker         // amount of memory to our pointer, and we verify the return result.
754*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { base::ioctl_with_mut_ref(self, KVM_CREATE_DEVICE, &mut device) };
755*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
756*bb4ee6a4SAndroid Build Coastguard Worker             Ok(
757*bb4ee6a4SAndroid Build Coastguard Worker                 // SAFETY:
758*bb4ee6a4SAndroid Build Coastguard Worker                 // Safe because we verify that ret is valid and we own the fd.
759*bb4ee6a4SAndroid Build Coastguard Worker                 unsafe { SafeDescriptor::from_raw_descriptor(device.fd as i32) },
760*bb4ee6a4SAndroid Build Coastguard Worker             )
761*bb4ee6a4SAndroid Build Coastguard Worker         } else {
762*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
763*bb4ee6a4SAndroid Build Coastguard Worker         }
764*bb4ee6a4SAndroid Build Coastguard Worker     }
765*bb4ee6a4SAndroid Build Coastguard Worker 
get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>766*bb4ee6a4SAndroid Build Coastguard Worker     fn get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()> {
767*bb4ee6a4SAndroid Build Coastguard Worker         let regions = self.mem_regions.lock();
768*bb4ee6a4SAndroid Build Coastguard Worker         let mmap = regions.get(&slot).ok_or_else(|| Error::new(ENOENT))?;
769*bb4ee6a4SAndroid Build Coastguard Worker         // Ensures that there are as many bytes in dirty_log as there are pages in the mmap.
770*bb4ee6a4SAndroid Build Coastguard Worker         if dirty_log_bitmap_size(mmap.size()) > dirty_log.len() {
771*bb4ee6a4SAndroid Build Coastguard Worker             return Err(Error::new(EINVAL));
772*bb4ee6a4SAndroid Build Coastguard Worker         }
773*bb4ee6a4SAndroid Build Coastguard Worker 
774*bb4ee6a4SAndroid Build Coastguard Worker         let mut dirty_log_kvm = kvm_dirty_log {
775*bb4ee6a4SAndroid Build Coastguard Worker             slot,
776*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
777*bb4ee6a4SAndroid Build Coastguard Worker         };
778*bb4ee6a4SAndroid Build Coastguard Worker         dirty_log_kvm.__bindgen_anon_1.dirty_bitmap = dirty_log.as_ptr() as *mut c_void;
779*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
780*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because the `dirty_bitmap` pointer assigned above is guaranteed to be valid (because
781*bb4ee6a4SAndroid Build Coastguard Worker         // it's from a slice) and we checked that it will be large enough to hold the entire log.
782*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl_with_ref(self, KVM_GET_DIRTY_LOG, &dirty_log_kvm) };
783*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
784*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
785*bb4ee6a4SAndroid Build Coastguard Worker         } else {
786*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
787*bb4ee6a4SAndroid Build Coastguard Worker         }
788*bb4ee6a4SAndroid Build Coastguard Worker     }
789*bb4ee6a4SAndroid Build Coastguard Worker 
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>790*bb4ee6a4SAndroid Build Coastguard Worker     fn register_ioevent(
791*bb4ee6a4SAndroid Build Coastguard Worker         &mut self,
792*bb4ee6a4SAndroid Build Coastguard Worker         evt: &Event,
793*bb4ee6a4SAndroid Build Coastguard Worker         addr: IoEventAddress,
794*bb4ee6a4SAndroid Build Coastguard Worker         datamatch: Datamatch,
795*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
796*bb4ee6a4SAndroid Build Coastguard Worker         self.ioeventfd(evt, addr, datamatch, false)
797*bb4ee6a4SAndroid Build Coastguard Worker     }
798*bb4ee6a4SAndroid Build Coastguard Worker 
unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>799*bb4ee6a4SAndroid Build Coastguard Worker     fn unregister_ioevent(
800*bb4ee6a4SAndroid Build Coastguard Worker         &mut self,
801*bb4ee6a4SAndroid Build Coastguard Worker         evt: &Event,
802*bb4ee6a4SAndroid Build Coastguard Worker         addr: IoEventAddress,
803*bb4ee6a4SAndroid Build Coastguard Worker         datamatch: Datamatch,
804*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
805*bb4ee6a4SAndroid Build Coastguard Worker         self.ioeventfd(evt, addr, datamatch, true)
806*bb4ee6a4SAndroid Build Coastguard Worker     }
807*bb4ee6a4SAndroid Build Coastguard Worker 
handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()>808*bb4ee6a4SAndroid Build Coastguard Worker     fn handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()> {
809*bb4ee6a4SAndroid Build Coastguard Worker         // KVM delivers IO events in-kernel with ioeventfds, so this is a no-op
810*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
811*bb4ee6a4SAndroid Build Coastguard Worker     }
812*bb4ee6a4SAndroid Build Coastguard Worker 
get_pvclock(&self) -> Result<ClockState>813*bb4ee6a4SAndroid Build Coastguard Worker     fn get_pvclock(&self) -> Result<ClockState> {
814*bb4ee6a4SAndroid Build Coastguard Worker         self.get_pvclock_arch()
815*bb4ee6a4SAndroid Build Coastguard Worker     }
816*bb4ee6a4SAndroid Build Coastguard Worker 
set_pvclock(&self, state: &ClockState) -> Result<()>817*bb4ee6a4SAndroid Build Coastguard Worker     fn set_pvclock(&self, state: &ClockState) -> Result<()> {
818*bb4ee6a4SAndroid Build Coastguard Worker         self.set_pvclock_arch(state)
819*bb4ee6a4SAndroid Build Coastguard Worker     }
820*bb4ee6a4SAndroid Build Coastguard Worker 
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>821*bb4ee6a4SAndroid Build Coastguard Worker     fn add_fd_mapping(
822*bb4ee6a4SAndroid Build Coastguard Worker         &mut self,
823*bb4ee6a4SAndroid Build Coastguard Worker         slot: u32,
824*bb4ee6a4SAndroid Build Coastguard Worker         offset: usize,
825*bb4ee6a4SAndroid Build Coastguard Worker         size: usize,
826*bb4ee6a4SAndroid Build Coastguard Worker         fd: &dyn AsRawDescriptor,
827*bb4ee6a4SAndroid Build Coastguard Worker         fd_offset: u64,
828*bb4ee6a4SAndroid Build Coastguard Worker         prot: Protection,
829*bb4ee6a4SAndroid Build Coastguard Worker     ) -> Result<()> {
830*bb4ee6a4SAndroid Build Coastguard Worker         let mut regions = self.mem_regions.lock();
831*bb4ee6a4SAndroid Build Coastguard Worker         let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
832*bb4ee6a4SAndroid Build Coastguard Worker 
833*bb4ee6a4SAndroid Build Coastguard Worker         match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
834*bb4ee6a4SAndroid Build Coastguard Worker             Ok(()) => Ok(()),
835*bb4ee6a4SAndroid Build Coastguard Worker             Err(MmapError::SystemCallFailed(e)) => Err(e),
836*bb4ee6a4SAndroid Build Coastguard Worker             Err(_) => Err(Error::new(EIO)),
837*bb4ee6a4SAndroid Build Coastguard Worker         }
838*bb4ee6a4SAndroid Build Coastguard Worker     }
839*bb4ee6a4SAndroid Build Coastguard Worker 
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>840*bb4ee6a4SAndroid Build Coastguard Worker     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
841*bb4ee6a4SAndroid Build Coastguard Worker         let mut regions = self.mem_regions.lock();
842*bb4ee6a4SAndroid Build Coastguard Worker         let region = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
843*bb4ee6a4SAndroid Build Coastguard Worker 
844*bb4ee6a4SAndroid Build Coastguard Worker         match region.remove_mapping(offset, size) {
845*bb4ee6a4SAndroid Build Coastguard Worker             Ok(()) => Ok(()),
846*bb4ee6a4SAndroid Build Coastguard Worker             Err(MmapError::SystemCallFailed(e)) => Err(e),
847*bb4ee6a4SAndroid Build Coastguard Worker             Err(_) => Err(Error::new(EIO)),
848*bb4ee6a4SAndroid Build Coastguard Worker         }
849*bb4ee6a4SAndroid Build Coastguard Worker     }
850*bb4ee6a4SAndroid Build Coastguard Worker 
handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>851*bb4ee6a4SAndroid Build Coastguard Worker     fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()> {
852*bb4ee6a4SAndroid Build Coastguard Worker         match event {
853*bb4ee6a4SAndroid Build Coastguard Worker             BalloonEvent::Inflate(m) => self.handle_inflate(m.guest_address, m.size),
854*bb4ee6a4SAndroid Build Coastguard Worker             BalloonEvent::Deflate(m) => self.handle_deflate(m.guest_address, m.size),
855*bb4ee6a4SAndroid Build Coastguard Worker             BalloonEvent::BalloonTargetReached(_) => Ok(()),
856*bb4ee6a4SAndroid Build Coastguard Worker         }
857*bb4ee6a4SAndroid Build Coastguard Worker     }
858*bb4ee6a4SAndroid Build Coastguard Worker }
859*bb4ee6a4SAndroid Build Coastguard Worker 
860*bb4ee6a4SAndroid Build Coastguard Worker impl AsRawDescriptor for KvmVm {
as_raw_descriptor(&self) -> RawDescriptor861*bb4ee6a4SAndroid Build Coastguard Worker     fn as_raw_descriptor(&self) -> RawDescriptor {
862*bb4ee6a4SAndroid Build Coastguard Worker         self.vm.as_raw_descriptor()
863*bb4ee6a4SAndroid Build Coastguard Worker     }
864*bb4ee6a4SAndroid Build Coastguard Worker }
865*bb4ee6a4SAndroid Build Coastguard Worker 
866*bb4ee6a4SAndroid Build Coastguard Worker struct KvmVcpuSignalHandle {
867*bb4ee6a4SAndroid Build Coastguard Worker     run_mmap: Arc<MemoryMapping>,
868*bb4ee6a4SAndroid Build Coastguard Worker }
869*bb4ee6a4SAndroid Build Coastguard Worker 
870*bb4ee6a4SAndroid Build Coastguard Worker impl VcpuSignalHandleInner for KvmVcpuSignalHandle {
signal_immediate_exit(&self)871*bb4ee6a4SAndroid Build Coastguard Worker     fn signal_immediate_exit(&self) {
872*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY: we ensure `run_mmap` is a valid mapping of `kvm_run` at creation time, and the
873*bb4ee6a4SAndroid Build Coastguard Worker         // `Arc` ensures the mapping still exists while we hold a reference to it.
874*bb4ee6a4SAndroid Build Coastguard Worker         unsafe {
875*bb4ee6a4SAndroid Build Coastguard Worker             let run = self.run_mmap.as_ptr() as *mut kvm_run;
876*bb4ee6a4SAndroid Build Coastguard Worker             (*run).immediate_exit = 1;
877*bb4ee6a4SAndroid Build Coastguard Worker         }
878*bb4ee6a4SAndroid Build Coastguard Worker     }
879*bb4ee6a4SAndroid Build Coastguard Worker }
880*bb4ee6a4SAndroid Build Coastguard Worker 
881*bb4ee6a4SAndroid Build Coastguard Worker /// A wrapper around using a KVM Vcpu.
882*bb4ee6a4SAndroid Build Coastguard Worker pub struct KvmVcpu {
883*bb4ee6a4SAndroid Build Coastguard Worker     kvm: Kvm,
884*bb4ee6a4SAndroid Build Coastguard Worker     vm: SafeDescriptor,
885*bb4ee6a4SAndroid Build Coastguard Worker     vcpu: File,
886*bb4ee6a4SAndroid Build Coastguard Worker     id: usize,
887*bb4ee6a4SAndroid Build Coastguard Worker     cap_kvmclock_ctrl: bool,
888*bb4ee6a4SAndroid Build Coastguard Worker     run_mmap: Arc<MemoryMapping>,
889*bb4ee6a4SAndroid Build Coastguard Worker }
890*bb4ee6a4SAndroid Build Coastguard Worker 
891*bb4ee6a4SAndroid Build Coastguard Worker impl Vcpu for KvmVcpu {
try_clone(&self) -> Result<Self>892*bb4ee6a4SAndroid Build Coastguard Worker     fn try_clone(&self) -> Result<Self> {
893*bb4ee6a4SAndroid Build Coastguard Worker         let vm = self.vm.try_clone()?;
894*bb4ee6a4SAndroid Build Coastguard Worker         let vcpu = self.vcpu.try_clone()?;
895*bb4ee6a4SAndroid Build Coastguard Worker 
896*bb4ee6a4SAndroid Build Coastguard Worker         Ok(KvmVcpu {
897*bb4ee6a4SAndroid Build Coastguard Worker             kvm: self.kvm.try_clone()?,
898*bb4ee6a4SAndroid Build Coastguard Worker             vm,
899*bb4ee6a4SAndroid Build Coastguard Worker             vcpu,
900*bb4ee6a4SAndroid Build Coastguard Worker             cap_kvmclock_ctrl: self.cap_kvmclock_ctrl,
901*bb4ee6a4SAndroid Build Coastguard Worker             id: self.id,
902*bb4ee6a4SAndroid Build Coastguard Worker             run_mmap: self.run_mmap.clone(),
903*bb4ee6a4SAndroid Build Coastguard Worker         })
904*bb4ee6a4SAndroid Build Coastguard Worker     }
905*bb4ee6a4SAndroid Build Coastguard Worker 
as_vcpu(&self) -> &dyn Vcpu906*bb4ee6a4SAndroid Build Coastguard Worker     fn as_vcpu(&self) -> &dyn Vcpu {
907*bb4ee6a4SAndroid Build Coastguard Worker         self
908*bb4ee6a4SAndroid Build Coastguard Worker     }
909*bb4ee6a4SAndroid Build Coastguard Worker 
id(&self) -> usize910*bb4ee6a4SAndroid Build Coastguard Worker     fn id(&self) -> usize {
911*bb4ee6a4SAndroid Build Coastguard Worker         self.id
912*bb4ee6a4SAndroid Build Coastguard Worker     }
913*bb4ee6a4SAndroid Build Coastguard Worker 
914*bb4ee6a4SAndroid Build Coastguard Worker     #[allow(clippy::cast_ptr_alignment)]
set_immediate_exit(&self, exit: bool)915*bb4ee6a4SAndroid Build Coastguard Worker     fn set_immediate_exit(&self, exit: bool) {
916*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
917*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know we mapped enough memory to hold the kvm_run struct because the
918*bb4ee6a4SAndroid Build Coastguard Worker         // kernel told us how large it was. The pointer is page aligned so casting to a different
919*bb4ee6a4SAndroid Build Coastguard Worker         // type is well defined, hence the clippy allow attribute.
920*bb4ee6a4SAndroid Build Coastguard Worker         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
921*bb4ee6a4SAndroid Build Coastguard Worker         run.immediate_exit = exit.into();
922*bb4ee6a4SAndroid Build Coastguard Worker     }
923*bb4ee6a4SAndroid Build Coastguard Worker 
signal_handle(&self) -> VcpuSignalHandle924*bb4ee6a4SAndroid Build Coastguard Worker     fn signal_handle(&self) -> VcpuSignalHandle {
925*bb4ee6a4SAndroid Build Coastguard Worker         VcpuSignalHandle {
926*bb4ee6a4SAndroid Build Coastguard Worker             inner: Box::new(KvmVcpuSignalHandle {
927*bb4ee6a4SAndroid Build Coastguard Worker                 run_mmap: self.run_mmap.clone(),
928*bb4ee6a4SAndroid Build Coastguard Worker             }),
929*bb4ee6a4SAndroid Build Coastguard Worker         }
930*bb4ee6a4SAndroid Build Coastguard Worker     }
931*bb4ee6a4SAndroid Build Coastguard Worker 
on_suspend(&self) -> Result<()>932*bb4ee6a4SAndroid Build Coastguard Worker     fn on_suspend(&self) -> Result<()> {
933*bb4ee6a4SAndroid Build Coastguard Worker         // On KVM implementations that use a paravirtualized clock (e.g. x86), a flag must be set to
934*bb4ee6a4SAndroid Build Coastguard Worker         // indicate to the guest kernel that a vCPU was suspended. The guest kernel will use this
935*bb4ee6a4SAndroid Build Coastguard Worker         // flag to prevent the soft lockup detection from triggering when this vCPU resumes, which
936*bb4ee6a4SAndroid Build Coastguard Worker         // could happen days later in realtime.
937*bb4ee6a4SAndroid Build Coastguard Worker         if self.cap_kvmclock_ctrl {
938*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
939*bb4ee6a4SAndroid Build Coastguard Worker             // The ioctl is safe because it does not read or write memory in this process.
940*bb4ee6a4SAndroid Build Coastguard Worker             if unsafe { ioctl(self, KVM_KVMCLOCK_CTRL) } != 0 {
941*bb4ee6a4SAndroid Build Coastguard Worker                 // Even if the host kernel supports the capability, it may not be configured by
942*bb4ee6a4SAndroid Build Coastguard Worker                 // the guest - for example, when the guest kernel offlines a CPU.
943*bb4ee6a4SAndroid Build Coastguard Worker                 if Error::last().errno() != libc::EINVAL {
944*bb4ee6a4SAndroid Build Coastguard Worker                     return errno_result();
945*bb4ee6a4SAndroid Build Coastguard Worker                 }
946*bb4ee6a4SAndroid Build Coastguard Worker             }
947*bb4ee6a4SAndroid Build Coastguard Worker         }
948*bb4ee6a4SAndroid Build Coastguard Worker 
949*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
950*bb4ee6a4SAndroid Build Coastguard Worker     }
951*bb4ee6a4SAndroid Build Coastguard Worker 
enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>952*bb4ee6a4SAndroid Build Coastguard Worker     unsafe fn enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()> {
953*bb4ee6a4SAndroid Build Coastguard Worker         let kvm_cap = kvm_enable_cap {
954*bb4ee6a4SAndroid Build Coastguard Worker             cap,
955*bb4ee6a4SAndroid Build Coastguard Worker             args: *args,
956*bb4ee6a4SAndroid Build Coastguard Worker             ..Default::default()
957*bb4ee6a4SAndroid Build Coastguard Worker         };
958*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
959*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we allocated the struct and we know the kernel will read exactly the size of
960*bb4ee6a4SAndroid Build Coastguard Worker         // the struct, and because we assume the caller has allocated the args appropriately.
961*bb4ee6a4SAndroid Build Coastguard Worker         let ret = ioctl_with_ref(self, KVM_ENABLE_CAP, &kvm_cap);
962*bb4ee6a4SAndroid Build Coastguard Worker         if ret == 0 {
963*bb4ee6a4SAndroid Build Coastguard Worker             Ok(())
964*bb4ee6a4SAndroid Build Coastguard Worker         } else {
965*bb4ee6a4SAndroid Build Coastguard Worker             errno_result()
966*bb4ee6a4SAndroid Build Coastguard Worker         }
967*bb4ee6a4SAndroid Build Coastguard Worker     }
968*bb4ee6a4SAndroid Build Coastguard Worker 
969*bb4ee6a4SAndroid Build Coastguard Worker     #[allow(clippy::cast_ptr_alignment)]
970*bb4ee6a4SAndroid Build Coastguard Worker     // The pointer is page aligned so casting to a different type is well defined, hence the clippy
971*bb4ee6a4SAndroid Build Coastguard Worker     // allow attribute.
run(&mut self) -> Result<VcpuExit>972*bb4ee6a4SAndroid Build Coastguard Worker     fn run(&mut self) -> Result<VcpuExit> {
973*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
974*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know that our file is a VCPU fd and we verify the return result.
975*bb4ee6a4SAndroid Build Coastguard Worker         let ret = unsafe { ioctl(self, KVM_RUN) };
976*bb4ee6a4SAndroid Build Coastguard Worker         if ret != 0 {
977*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
978*bb4ee6a4SAndroid Build Coastguard Worker         }
979*bb4ee6a4SAndroid Build Coastguard Worker 
980*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
981*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know we mapped enough memory to hold the kvm_run struct because the
982*bb4ee6a4SAndroid Build Coastguard Worker         // kernel told us how large it was.
983*bb4ee6a4SAndroid Build Coastguard Worker         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
984*bb4ee6a4SAndroid Build Coastguard Worker 
985*bb4ee6a4SAndroid Build Coastguard Worker         // Check for architecture-specific VM exit reasons first in case the architecture wants to
986*bb4ee6a4SAndroid Build Coastguard Worker         // override the default handling.
987*bb4ee6a4SAndroid Build Coastguard Worker         if let Some(vcpu_exit) = self.handle_vm_exit_arch(run) {
988*bb4ee6a4SAndroid Build Coastguard Worker             return Ok(vcpu_exit);
989*bb4ee6a4SAndroid Build Coastguard Worker         }
990*bb4ee6a4SAndroid Build Coastguard Worker 
991*bb4ee6a4SAndroid Build Coastguard Worker         match run.exit_reason {
992*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_MMIO => Ok(VcpuExit::Mmio),
993*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
994*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_HYPERCALL => Ok(VcpuExit::Hypercall),
995*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_DEBUG => Ok(VcpuExit::Debug),
996*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen),
997*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown(Ok(()))),
998*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_FAIL_ENTRY => {
999*bb4ee6a4SAndroid Build Coastguard Worker                 // SAFETY:
1000*bb4ee6a4SAndroid Build Coastguard Worker                 // Safe because the exit_reason (which comes from the kernel) told us which
1001*bb4ee6a4SAndroid Build Coastguard Worker                 // union field to use.
1002*bb4ee6a4SAndroid Build Coastguard Worker                 let hardware_entry_failure_reason = unsafe {
1003*bb4ee6a4SAndroid Build Coastguard Worker                     run.__bindgen_anon_1
1004*bb4ee6a4SAndroid Build Coastguard Worker                         .fail_entry
1005*bb4ee6a4SAndroid Build Coastguard Worker                         .hardware_entry_failure_reason
1006*bb4ee6a4SAndroid Build Coastguard Worker                 };
1007*bb4ee6a4SAndroid Build Coastguard Worker                 Ok(VcpuExit::FailEntry {
1008*bb4ee6a4SAndroid Build Coastguard Worker                     hardware_entry_failure_reason,
1009*bb4ee6a4SAndroid Build Coastguard Worker                 })
1010*bb4ee6a4SAndroid Build Coastguard Worker             }
1011*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_INTR => Ok(VcpuExit::Intr),
1012*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
1013*bb4ee6a4SAndroid Build Coastguard Worker             KVM_EXIT_SYSTEM_EVENT => {
1014*bb4ee6a4SAndroid Build Coastguard Worker                 // SAFETY:
1015*bb4ee6a4SAndroid Build Coastguard Worker                 // Safe because we know the exit reason told us this union
1016*bb4ee6a4SAndroid Build Coastguard Worker                 // field is valid
1017*bb4ee6a4SAndroid Build Coastguard Worker                 let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
1018*bb4ee6a4SAndroid Build Coastguard Worker                 let event_flags =
1019*bb4ee6a4SAndroid Build Coastguard Worker                     // SAFETY:
1020*bb4ee6a4SAndroid Build Coastguard Worker                     // Safe because we know the exit reason told us this union
1021*bb4ee6a4SAndroid Build Coastguard Worker                     // field is valid
1022*bb4ee6a4SAndroid Build Coastguard Worker                     unsafe { run.__bindgen_anon_1.system_event.__bindgen_anon_1.flags };
1023*bb4ee6a4SAndroid Build Coastguard Worker                 match event_type {
1024*bb4ee6a4SAndroid Build Coastguard Worker                     KVM_SYSTEM_EVENT_SHUTDOWN => Ok(VcpuExit::SystemEventShutdown),
1025*bb4ee6a4SAndroid Build Coastguard Worker                     KVM_SYSTEM_EVENT_RESET => self.system_event_reset(event_flags),
1026*bb4ee6a4SAndroid Build Coastguard Worker                     KVM_SYSTEM_EVENT_CRASH => Ok(VcpuExit::SystemEventCrash),
1027*bb4ee6a4SAndroid Build Coastguard Worker                     _ => {
1028*bb4ee6a4SAndroid Build Coastguard Worker                         error!(
1029*bb4ee6a4SAndroid Build Coastguard Worker                             "Unknown KVM system event {} with flags {}",
1030*bb4ee6a4SAndroid Build Coastguard Worker                             event_type, event_flags
1031*bb4ee6a4SAndroid Build Coastguard Worker                         );
1032*bb4ee6a4SAndroid Build Coastguard Worker                         Err(Error::new(EINVAL))
1033*bb4ee6a4SAndroid Build Coastguard Worker                     }
1034*bb4ee6a4SAndroid Build Coastguard Worker                 }
1035*bb4ee6a4SAndroid Build Coastguard Worker             }
1036*bb4ee6a4SAndroid Build Coastguard Worker             r => panic!("unknown kvm exit reason: {}", r),
1037*bb4ee6a4SAndroid Build Coastguard Worker         }
1038*bb4ee6a4SAndroid Build Coastguard Worker     }
1039*bb4ee6a4SAndroid Build Coastguard Worker 
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>1040*bb4ee6a4SAndroid Build Coastguard Worker     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()> {
1041*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
1042*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know we mapped enough memory to hold the kvm_run struct because the
1043*bb4ee6a4SAndroid Build Coastguard Worker         // kernel told us how large it was.
1044*bb4ee6a4SAndroid Build Coastguard Worker         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
1045*bb4ee6a4SAndroid Build Coastguard Worker         // Verify that the handler is called in the right context.
1046*bb4ee6a4SAndroid Build Coastguard Worker         assert!(run.exit_reason == KVM_EXIT_MMIO);
1047*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
1048*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because the exit_reason (which comes from the kernel) told us which
1049*bb4ee6a4SAndroid Build Coastguard Worker         // union field to use.
1050*bb4ee6a4SAndroid Build Coastguard Worker         let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
1051*bb4ee6a4SAndroid Build Coastguard Worker         let address = mmio.phys_addr;
1052*bb4ee6a4SAndroid Build Coastguard Worker         let data = &mut mmio.data[..mmio.len as usize];
1053*bb4ee6a4SAndroid Build Coastguard Worker         if mmio.is_write != 0 {
1054*bb4ee6a4SAndroid Build Coastguard Worker             handle_fn(IoParams {
1055*bb4ee6a4SAndroid Build Coastguard Worker                 address,
1056*bb4ee6a4SAndroid Build Coastguard Worker                 operation: IoOperation::Write(data),
1057*bb4ee6a4SAndroid Build Coastguard Worker             })
1058*bb4ee6a4SAndroid Build Coastguard Worker         } else {
1059*bb4ee6a4SAndroid Build Coastguard Worker             handle_fn(IoParams {
1060*bb4ee6a4SAndroid Build Coastguard Worker                 address,
1061*bb4ee6a4SAndroid Build Coastguard Worker                 operation: IoOperation::Read(data),
1062*bb4ee6a4SAndroid Build Coastguard Worker             })
1063*bb4ee6a4SAndroid Build Coastguard Worker         }
1064*bb4ee6a4SAndroid Build Coastguard Worker     }
1065*bb4ee6a4SAndroid Build Coastguard Worker 
handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>1066*bb4ee6a4SAndroid Build Coastguard Worker     fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()> {
1067*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
1068*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because we know we mapped enough memory to hold the kvm_run struct because the
1069*bb4ee6a4SAndroid Build Coastguard Worker         // kernel told us how large it was.
1070*bb4ee6a4SAndroid Build Coastguard Worker         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
1071*bb4ee6a4SAndroid Build Coastguard Worker         // Verify that the handler is called in the right context.
1072*bb4ee6a4SAndroid Build Coastguard Worker         assert!(run.exit_reason == KVM_EXIT_IO);
1073*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
1074*bb4ee6a4SAndroid Build Coastguard Worker         // Safe because the exit_reason (which comes from the kernel) told us which
1075*bb4ee6a4SAndroid Build Coastguard Worker         // union field to use.
1076*bb4ee6a4SAndroid Build Coastguard Worker         let io = unsafe { run.__bindgen_anon_1.io };
1077*bb4ee6a4SAndroid Build Coastguard Worker         let address = u64::from(io.port);
1078*bb4ee6a4SAndroid Build Coastguard Worker         let size = usize::from(io.size);
1079*bb4ee6a4SAndroid Build Coastguard Worker         let count = io.count as usize;
1080*bb4ee6a4SAndroid Build Coastguard Worker         let data_len = count * size;
1081*bb4ee6a4SAndroid Build Coastguard Worker         let data_offset = io.data_offset as usize;
1082*bb4ee6a4SAndroid Build Coastguard Worker         assert!(data_offset + data_len <= self.run_mmap.size());
1083*bb4ee6a4SAndroid Build Coastguard Worker 
1084*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY:
1085*bb4ee6a4SAndroid Build Coastguard Worker         // The data_offset is defined by the kernel to be some number of bytes into the kvm_run
1086*bb4ee6a4SAndroid Build Coastguard Worker         // structure, which we have fully mmap'd.
1087*bb4ee6a4SAndroid Build Coastguard Worker         let buffer: &mut [u8] = unsafe {
1088*bb4ee6a4SAndroid Build Coastguard Worker             std::slice::from_raw_parts_mut(
1089*bb4ee6a4SAndroid Build Coastguard Worker                 (run as *mut kvm_run as *mut u8).add(data_offset),
1090*bb4ee6a4SAndroid Build Coastguard Worker                 data_len,
1091*bb4ee6a4SAndroid Build Coastguard Worker             )
1092*bb4ee6a4SAndroid Build Coastguard Worker         };
1093*bb4ee6a4SAndroid Build Coastguard Worker         let data_chunks = buffer.chunks_mut(size);
1094*bb4ee6a4SAndroid Build Coastguard Worker 
1095*bb4ee6a4SAndroid Build Coastguard Worker         if io.direction == KVM_EXIT_IO_IN as u8 {
1096*bb4ee6a4SAndroid Build Coastguard Worker             for data in data_chunks {
1097*bb4ee6a4SAndroid Build Coastguard Worker                 handle_fn(IoParams {
1098*bb4ee6a4SAndroid Build Coastguard Worker                     address,
1099*bb4ee6a4SAndroid Build Coastguard Worker                     operation: IoOperation::Read(data),
1100*bb4ee6a4SAndroid Build Coastguard Worker                 });
1101*bb4ee6a4SAndroid Build Coastguard Worker             }
1102*bb4ee6a4SAndroid Build Coastguard Worker         } else {
1103*bb4ee6a4SAndroid Build Coastguard Worker             debug_assert_eq!(io.direction, KVM_EXIT_IO_OUT as u8);
1104*bb4ee6a4SAndroid Build Coastguard Worker             for data in data_chunks {
1105*bb4ee6a4SAndroid Build Coastguard Worker                 handle_fn(IoParams {
1106*bb4ee6a4SAndroid Build Coastguard Worker                     address,
1107*bb4ee6a4SAndroid Build Coastguard Worker                     operation: IoOperation::Write(data),
1108*bb4ee6a4SAndroid Build Coastguard Worker                 });
1109*bb4ee6a4SAndroid Build Coastguard Worker             }
1110*bb4ee6a4SAndroid Build Coastguard Worker         }
1111*bb4ee6a4SAndroid Build Coastguard Worker 
1112*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
1113*bb4ee6a4SAndroid Build Coastguard Worker     }
1114*bb4ee6a4SAndroid Build Coastguard Worker }
1115*bb4ee6a4SAndroid Build Coastguard Worker 
1116*bb4ee6a4SAndroid Build Coastguard Worker impl KvmVcpu {
1117*bb4ee6a4SAndroid Build Coastguard Worker     /// Gets the vcpu's current "multiprocessing state".
1118*bb4ee6a4SAndroid Build Coastguard Worker     ///
1119*bb4ee6a4SAndroid Build Coastguard Worker     /// See the documentation for KVM_GET_MP_STATE. This call can only succeed after
1120*bb4ee6a4SAndroid Build Coastguard Worker     /// a call to `Vm::create_irq_chip`.
1121*bb4ee6a4SAndroid Build Coastguard Worker     ///
1122*bb4ee6a4SAndroid Build Coastguard Worker     /// Note that KVM defines the call for both x86 and s390 but we do not expect anyone
1123*bb4ee6a4SAndroid Build Coastguard Worker     /// to run crosvm on s390.
get_mp_state(&self) -> Result<kvm_mp_state>1124*bb4ee6a4SAndroid Build Coastguard Worker     pub fn get_mp_state(&self) -> Result<kvm_mp_state> {
1125*bb4ee6a4SAndroid Build Coastguard Worker         // SAFETY: trivially safe
1126*bb4ee6a4SAndroid Build Coastguard Worker         let mut state: kvm_mp_state = unsafe { std::mem::zeroed() };
1127*bb4ee6a4SAndroid Build Coastguard Worker         let ret = {
1128*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
1129*bb4ee6a4SAndroid Build Coastguard Worker             // Safe because we know that our file is a VCPU fd, we know the kernel will only write
1130*bb4ee6a4SAndroid Build Coastguard Worker             // the correct amount of memory to our pointer, and we verify the return
1131*bb4ee6a4SAndroid Build Coastguard Worker             // result.
1132*bb4ee6a4SAndroid Build Coastguard Worker             unsafe { ioctl_with_mut_ref(self, KVM_GET_MP_STATE, &mut state) }
1133*bb4ee6a4SAndroid Build Coastguard Worker         };
1134*bb4ee6a4SAndroid Build Coastguard Worker         if ret < 0 {
1135*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
1136*bb4ee6a4SAndroid Build Coastguard Worker         }
1137*bb4ee6a4SAndroid Build Coastguard Worker         Ok(state)
1138*bb4ee6a4SAndroid Build Coastguard Worker     }
1139*bb4ee6a4SAndroid Build Coastguard Worker 
1140*bb4ee6a4SAndroid Build Coastguard Worker     /// Sets the vcpu's current "multiprocessing state".
1141*bb4ee6a4SAndroid Build Coastguard Worker     ///
1142*bb4ee6a4SAndroid Build Coastguard Worker     /// See the documentation for KVM_SET_MP_STATE. This call can only succeed after
1143*bb4ee6a4SAndroid Build Coastguard Worker     /// a call to `Vm::create_irq_chip`.
1144*bb4ee6a4SAndroid Build Coastguard Worker     ///
1145*bb4ee6a4SAndroid Build Coastguard Worker     /// Note that KVM defines the call for both x86 and s390 but we do not expect anyone
1146*bb4ee6a4SAndroid Build Coastguard Worker     /// to run crosvm on s390.
set_mp_state(&self, state: &kvm_mp_state) -> Result<()>1147*bb4ee6a4SAndroid Build Coastguard Worker     pub fn set_mp_state(&self, state: &kvm_mp_state) -> Result<()> {
1148*bb4ee6a4SAndroid Build Coastguard Worker         let ret = {
1149*bb4ee6a4SAndroid Build Coastguard Worker             // SAFETY:
1150*bb4ee6a4SAndroid Build Coastguard Worker             // The ioctl is safe because the kernel will only read from the kvm_mp_state struct.
1151*bb4ee6a4SAndroid Build Coastguard Worker             unsafe { ioctl_with_ref(self, KVM_SET_MP_STATE, state) }
1152*bb4ee6a4SAndroid Build Coastguard Worker         };
1153*bb4ee6a4SAndroid Build Coastguard Worker         if ret < 0 {
1154*bb4ee6a4SAndroid Build Coastguard Worker             return errno_result();
1155*bb4ee6a4SAndroid Build Coastguard Worker         }
1156*bb4ee6a4SAndroid Build Coastguard Worker         Ok(())
1157*bb4ee6a4SAndroid Build Coastguard Worker     }
1158*bb4ee6a4SAndroid Build Coastguard Worker }
1159*bb4ee6a4SAndroid Build Coastguard Worker 
1160*bb4ee6a4SAndroid Build Coastguard Worker impl AsRawDescriptor for KvmVcpu {
as_raw_descriptor(&self) -> RawDescriptor1161*bb4ee6a4SAndroid Build Coastguard Worker     fn as_raw_descriptor(&self) -> RawDescriptor {
1162*bb4ee6a4SAndroid Build Coastguard Worker         self.vcpu.as_raw_descriptor()
1163*bb4ee6a4SAndroid Build Coastguard Worker     }
1164*bb4ee6a4SAndroid Build Coastguard Worker }
1165*bb4ee6a4SAndroid Build Coastguard Worker 
1166*bb4ee6a4SAndroid Build Coastguard Worker impl TryFrom<HypervisorCap> for KvmCap {
1167*bb4ee6a4SAndroid Build Coastguard Worker     type Error = Error;
1168*bb4ee6a4SAndroid Build Coastguard Worker 
try_from(cap: HypervisorCap) -> Result<KvmCap>1169*bb4ee6a4SAndroid Build Coastguard Worker     fn try_from(cap: HypervisorCap) -> Result<KvmCap> {
1170*bb4ee6a4SAndroid Build Coastguard Worker         match cap {
1171*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::ArmPmuV3 => Ok(KvmCap::ArmPmuV3),
1172*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::ImmediateExit => Ok(KvmCap::ImmediateExit),
1173*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::S390UserSigp => Ok(KvmCap::S390UserSigp),
1174*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::TscDeadlineTimer => Ok(KvmCap::TscDeadlineTimer),
1175*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::UserMemory => Ok(KvmCap::UserMemory),
1176*bb4ee6a4SAndroid Build Coastguard Worker             #[cfg(target_arch = "x86_64")]
1177*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::Xcrs => Ok(KvmCap::Xcrs),
1178*bb4ee6a4SAndroid Build Coastguard Worker             #[cfg(target_arch = "x86_64")]
1179*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::CalibratedTscLeafRequired => Err(Error::new(libc::EINVAL)),
1180*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::StaticSwiotlbAllocationRequired => Err(Error::new(libc::EINVAL)),
1181*bb4ee6a4SAndroid Build Coastguard Worker             HypervisorCap::HypervisorInitializedBootContext => Err(Error::new(libc::EINVAL)),
1182*bb4ee6a4SAndroid Build Coastguard Worker         }
1183*bb4ee6a4SAndroid Build Coastguard Worker     }
1184*bb4ee6a4SAndroid Build Coastguard Worker }
1185*bb4ee6a4SAndroid Build Coastguard Worker 
1186*bb4ee6a4SAndroid Build Coastguard Worker impl From<&IrqRoute> for kvm_irq_routing_entry {
from(item: &IrqRoute) -> Self1187*bb4ee6a4SAndroid Build Coastguard Worker     fn from(item: &IrqRoute) -> Self {
1188*bb4ee6a4SAndroid Build Coastguard Worker         match &item.source {
1189*bb4ee6a4SAndroid Build Coastguard Worker             IrqSource::Irqchip { chip, pin } => kvm_irq_routing_entry {
1190*bb4ee6a4SAndroid Build Coastguard Worker                 gsi: item.gsi,
1191*bb4ee6a4SAndroid Build Coastguard Worker                 type_: KVM_IRQ_ROUTING_IRQCHIP,
1192*bb4ee6a4SAndroid Build Coastguard Worker                 u: kvm_irq_routing_entry__bindgen_ty_1 {
1193*bb4ee6a4SAndroid Build Coastguard Worker                     irqchip: kvm_irq_routing_irqchip {
1194*bb4ee6a4SAndroid Build Coastguard Worker                         irqchip: chip_to_kvm_chip(*chip),
1195*bb4ee6a4SAndroid Build Coastguard Worker                         pin: *pin,
1196*bb4ee6a4SAndroid Build Coastguard Worker                     },
1197*bb4ee6a4SAndroid Build Coastguard Worker                 },
1198*bb4ee6a4SAndroid Build Coastguard Worker                 ..Default::default()
1199*bb4ee6a4SAndroid Build Coastguard Worker             },
1200*bb4ee6a4SAndroid Build Coastguard Worker             IrqSource::Msi { address, data } => kvm_irq_routing_entry {
1201*bb4ee6a4SAndroid Build Coastguard Worker                 gsi: item.gsi,
1202*bb4ee6a4SAndroid Build Coastguard Worker                 type_: KVM_IRQ_ROUTING_MSI,
1203*bb4ee6a4SAndroid Build Coastguard Worker                 u: kvm_irq_routing_entry__bindgen_ty_1 {
1204*bb4ee6a4SAndroid Build Coastguard Worker                     msi: kvm_irq_routing_msi {
1205*bb4ee6a4SAndroid Build Coastguard Worker                         address_lo: *address as u32,
1206*bb4ee6a4SAndroid Build Coastguard Worker                         address_hi: (*address >> 32) as u32,
1207*bb4ee6a4SAndroid Build Coastguard Worker                         data: *data,
1208*bb4ee6a4SAndroid Build Coastguard Worker                         ..Default::default()
1209*bb4ee6a4SAndroid Build Coastguard Worker                     },
1210*bb4ee6a4SAndroid Build Coastguard Worker                 },
1211*bb4ee6a4SAndroid Build Coastguard Worker                 ..Default::default()
1212*bb4ee6a4SAndroid Build Coastguard Worker             },
1213*bb4ee6a4SAndroid Build Coastguard Worker         }
1214*bb4ee6a4SAndroid Build Coastguard Worker     }
1215*bb4ee6a4SAndroid Build Coastguard Worker }
1216*bb4ee6a4SAndroid Build Coastguard Worker 
1217*bb4ee6a4SAndroid Build Coastguard Worker impl From<&kvm_mp_state> for MPState {
from(item: &kvm_mp_state) -> Self1218*bb4ee6a4SAndroid Build Coastguard Worker     fn from(item: &kvm_mp_state) -> Self {
1219*bb4ee6a4SAndroid Build Coastguard Worker         match item.mp_state {
1220*bb4ee6a4SAndroid Build Coastguard Worker             KVM_MP_STATE_RUNNABLE => MPState::Runnable,
1221*bb4ee6a4SAndroid Build Coastguard Worker             KVM_MP_STATE_UNINITIALIZED => MPState::Uninitialized,
1222*bb4ee6a4SAndroid Build Coastguard Worker             KVM_MP_STATE_INIT_RECEIVED => MPState::InitReceived,
1223*bb4ee6a4SAndroid Build Coastguard Worker             KVM_MP_STATE_HALTED => MPState::Halted,
1224*bb4ee6a4SAndroid Build Coastguard Worker             KVM_MP_STATE_SIPI_RECEIVED => MPState::SipiReceived,
1225*bb4ee6a4SAndroid Build Coastguard Worker             KVM_MP_STATE_STOPPED => MPState::Stopped,
1226*bb4ee6a4SAndroid Build Coastguard Worker             state => {
1227*bb4ee6a4SAndroid Build Coastguard Worker                 error!(
1228*bb4ee6a4SAndroid Build Coastguard Worker                     "unrecognized kvm_mp_state {}, setting to KVM_MP_STATE_RUNNABLE",
1229*bb4ee6a4SAndroid Build Coastguard Worker                     state
1230*bb4ee6a4SAndroid Build Coastguard Worker                 );
1231*bb4ee6a4SAndroid Build Coastguard Worker                 MPState::Runnable
1232*bb4ee6a4SAndroid Build Coastguard Worker             }
1233*bb4ee6a4SAndroid Build Coastguard Worker         }
1234*bb4ee6a4SAndroid Build Coastguard Worker     }
1235*bb4ee6a4SAndroid Build Coastguard Worker }
1236*bb4ee6a4SAndroid Build Coastguard Worker 
1237*bb4ee6a4SAndroid Build Coastguard Worker impl From<&MPState> for kvm_mp_state {
from(item: &MPState) -> Self1238*bb4ee6a4SAndroid Build Coastguard Worker     fn from(item: &MPState) -> Self {
1239*bb4ee6a4SAndroid Build Coastguard Worker         kvm_mp_state {
1240*bb4ee6a4SAndroid Build Coastguard Worker             mp_state: match item {
1241*bb4ee6a4SAndroid Build Coastguard Worker                 MPState::Runnable => KVM_MP_STATE_RUNNABLE,
1242*bb4ee6a4SAndroid Build Coastguard Worker                 MPState::Uninitialized => KVM_MP_STATE_UNINITIALIZED,
1243*bb4ee6a4SAndroid Build Coastguard Worker                 MPState::InitReceived => KVM_MP_STATE_INIT_RECEIVED,
1244*bb4ee6a4SAndroid Build Coastguard Worker                 MPState::Halted => KVM_MP_STATE_HALTED,
1245*bb4ee6a4SAndroid Build Coastguard Worker                 MPState::SipiReceived => KVM_MP_STATE_SIPI_RECEIVED,
1246*bb4ee6a4SAndroid Build Coastguard Worker                 MPState::Stopped => KVM_MP_STATE_STOPPED,
1247*bb4ee6a4SAndroid Build Coastguard Worker             },
1248*bb4ee6a4SAndroid Build Coastguard Worker         }
1249*bb4ee6a4SAndroid Build Coastguard Worker     }
1250*bb4ee6a4SAndroid Build Coastguard Worker }
1251